code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from django.db import models
from django_extensions.db.fields import (CreationDateTimeField,
ModificationDateTimeField)
class TimestampAbstract(models.Model):
"""
Subclass this to get timestamp fields.
"""
created = CreationDateTimeField()
modified = ModificationDateTimeField()
class Meta:
abstract = True
|
theteam/scampcat.com
|
scampcat/common/models.py
|
Python
|
mit
| 383
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-sync-users-from-file
# Author : Adrian Casajus
########################################################################
"""
Sync users in Configuration with the cfg contents.
Usage:
dirac-admin-sync-users-from-file [options] ... UserCfg
Arguments:
UserCfg: Cfg FileName with Users as sections containing DN, Groups, and other properties as options
Example:
$ dirac-admin-sync-users-from-file file_users.cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from diraccfg import CFG
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
__RCSID__ = "$Id$"
@DIRACScript()
def main():
Script.registerSwitch("t", "test", "Only test. Don't commit changes")
Script.parseCommandLine(ignoreErrors=True)
args = Script.getExtraCLICFGFiles()
if len(args) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
testOnly = False
errorList = []
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("t", "test"):
testOnly = True
try:
usersCFG = CFG().loadFromFile(args[0])
except Exception as e:
errorList.append("file open", "Can't parse file %s: %s" % (args[0], str(e)))
errorCode = 1
else:
if not diracAdmin.csSyncUsersWithCFG(usersCFG):
errorList.append(("modify users", "Cannot sync with %s" % args[0]))
exitCode = 255
if not exitCode and not testOnly:
result = diracAdmin.csCommitChanges()
if not result['OK']:
errorList.append(("commit", result['Message']))
exitCode = 255
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_sync_users_from_file.py
|
Python
|
gpl-3.0
| 1,909
|
# Build-in Default Functions for UnivMathSys
# Copyright (C) 2016 Zhang Chang-kai #
# Contact via: phy.zhangck@gmail.com #
# General Public License version 3.0 #
'''Module Elementary.build of UnivMathSys'''
def TrueFunc(*args):
return True
def FalseFunc(*args):
return False
# End of Module Elementary.build of UnivMathSys
|
Phy-David-Zhang/UnivMathSys
|
Elementary/build.py
|
Python
|
gpl-3.0
| 363
|
import socket
import sys
class Client:
def __init__(self,host,port):
self.host = host
self.port = port
self.size = 1024
self.open_socket()
def open_socket(self):
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.connect((self.host, self.port))
except socket.error, (value,message):
if self.server:
self.server.close()
print "Could not open socket: " + message
sys.exit(0)
def run(self):
rospy.init_node('clientWifi', anonymous=True)
rospy.Subscriber("referee", String, sendMessage)
rospy.spin()
def sendMessage(self, data):
print "data from referee topic: ",data.data
#self.server.send(data.data)
#dataRecv = self.server.recv(self.size)
#self.server.close()
|
cletusw/goal-e
|
src/goale/scripts/client.py
|
Python
|
mit
| 886
|
from decimal import Decimal
import logging
import stripe
from django.conf import settings
from django.utils.translation import ugettext as _
from corehq import Domain
from corehq.apps.accounting.models import (
BillingAccount,
CreditLine,
Invoice,
PaymentRecord,
SoftwareProductType,
)
from corehq.apps.accounting.user_text import get_feature_name
from corehq.apps.accounting.utils import fmt_dollar_amount
from corehq.const import USER_DATE_FORMAT
from dimagi.utils.decorators.memoized import memoized
stripe.api_key = settings.STRIPE_PRIVATE_KEY
logger = logging.getLogger('accounting')
def get_or_create_stripe_customer(payment_method):
customer = None
if payment_method.customer_id is not None:
try:
customer = stripe.Customer.retrieve(payment_method.customer_id)
except stripe.InvalidRequestError:
pass
if customer is None:
customer = stripe.Customer.create(
description="{}'s cards".format(payment_method.web_user),
email=payment_method.web_user,
)
payment_method.customer_id = customer.id
payment_method.save()
return customer
class BaseStripePaymentHandler(object):
"""Handler for paying via Stripe's API
"""
receipt_email_template = None
receipt_email_template_plaintext = None
def __init__(self, payment_method, domain):
self.payment_method = payment_method
self.domain = domain
@property
def cost_item_name(self):
"""Returns a name for the cost item that's used in the logging messages.
"""
raise NotImplementedError("you must implement cost_item_name")
@property
@memoized
def core_product(self):
domain = Domain.get_by_name(self.domain)
return SoftwareProductType.get_type_by_domain(domain)
def create_charge(self, amount, card=None, customer=None):
"""Process the HTTPRequest used to make this payment
returns a dict to be used as the json response for the request.
"""
raise NotImplementedError("you must implement process_request")
def get_charge_amount(self, request):
"""Returns a Decimal of the amount to be charged.
"""
raise NotImplementedError("you must implement get_charge_amount")
def update_credits(self, payment_record):
"""Updates any relevant Credit lines
"""
raise NotImplementedError("you must implement update_credits")
def get_amount_in_cents(self, amount):
amt_cents = amount * Decimal('100')
return int(amt_cents.quantize(Decimal(10)))
def process_request(self, request):
customer = None
amount = self.get_charge_amount(request)
card = request.POST.get('stripeToken')
remove_card = request.POST.get('removeCard')
is_saved_card = request.POST.get('selectedCardType') == 'saved'
save_card = request.POST.get('saveCard') and not is_saved_card
generic_error = {
'error': {
'message': _(
"Something went wrong while processing your payment. "
"We're working quickly to resolve the issue. No charges "
"were issued. Please try again in a few hours."
),
},
}
try:
if remove_card:
customer = get_or_create_stripe_customer(self.payment_method)
customer.cards.retrieve(card).delete()
return {
'success': True,
'removedCard': card,
}
if save_card:
customer = get_or_create_stripe_customer(self.payment_method)
card = customer.cards.create(card=card)
customer.default_card = card
customer.save()
card = card
if is_saved_card:
customer = get_or_create_stripe_customer(self.payment_method)
charge = self.create_charge(amount, card=card, customer=customer)
payment_record = PaymentRecord.create_record(
self.payment_method, charge.id, amount
)
self.update_credits(payment_record)
try:
self.send_email(payment_record)
except Exception:
logger.error(
"[BILLING] Failed to send out an email receipt for "
"payment related to PaymentRecord No. %s. "
"Everything else succeeded."
% payment_record.id, exc_info=True
)
except stripe.error.CardError as e:
# card was declined
return e.json_body
except (
stripe.error.AuthenticationError,
stripe.error.InvalidRequestError,
stripe.error.APIConnectionError,
stripe.error.StripeError,
) as e:
logger.error(
"[BILLING] A payment for %(cost_item)s failed due "
"to a Stripe %(error_class)s: %(error_msg)s" % {
'error_class': e.__class__.__name__,
'cost_item': self.cost_item_name,
'error_msg': e.json_body['error']
}, exc_info=True)
return generic_error
except Exception as e:
logger.error(
"[BILLING] A payment for %(cost_item)s failed due "
"to: %(error_msg)s" % {
'cost_item': self.cost_item_name,
'error_msg': e,
}, exc_info=True)
return generic_error
return {
'success': True,
'card': card,
'wasSaved': save_card,
'changedBalance': amount,
}
def get_email_context(self):
return {
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
def send_email(self, payment_record):
additional_context = self.get_email_context()
from corehq.apps.accounting.tasks import send_purchase_receipt
send_purchase_receipt.delay(
payment_record, self.core_product, self.receipt_email_template,
self.receipt_email_template_plaintext, additional_context
)
class InvoiceStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/invoice_receipt_email.html'
receipt_email_template_plaintext = 'accounting/invoice_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain, invoice):
super(InvoiceStripePaymentHandler, self).__init__(payment_method, domain)
self.invoice = invoice
@property
def cost_item_name(self):
return _("Invoice #%s") % self.invoice.id
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description="Payment for Invoice %s" % self.invoice.invoice_number,
)
def get_charge_amount(self, request):
"""Returns a Decimal of the amount to be charged.
"""
if request.POST['paymentAmount'] == 'full':
return self.invoice.balance.quantize(Decimal(10) ** -2)
return Decimal(request.POST['customPaymentAmount'])
def update_credits(self, payment_record):
# record the credit to the account
CreditLine.add_credit(
payment_record.amount, account=self.invoice.subscription.account,
payment_record=payment_record,
)
CreditLine.add_credit(
-payment_record.amount,
account=self.invoice.subscription.account,
invoice=self.invoice,
)
self.invoice.update_balance()
self.invoice.save()
def get_email_context(self):
context = super(InvoiceStripePaymentHandler, self).get_email_context()
context.update({
'balance': fmt_dollar_amount(self.invoice.balance),
'is_paid': self.invoice.is_paid,
'date_due': self.invoice.date_due.strftime(USER_DATE_FORMAT) if self.invoice.date_due else 'None',
'invoice_num': self.invoice.invoice_number,
})
return context
class BulkStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/bulk_payment_receipt_email.html'
receipt_email_template_plaintext = 'accounting/bulk_payment_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain):
super(BulkStripePaymentHandler, self).__init__(payment_method, domain)
@property
def cost_item_name(self):
return _('Bulk Payment for project space %s' % self.domain)
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description=self.cost_item_name,
)
@property
def invoices(self):
return Invoice.objects.filter(
subscription__subscriber__domain=self.domain,
is_hidden=False,
)
@property
def balance(self):
return sum(invoice.balance for invoice in self.invoices)
def get_charge_amount(self, request):
if request.POST['paymentAmount'] == 'full':
return self.balance
return Decimal(request.POST['customPaymentAmount'])
def update_credits(self, payment_record):
amount = payment_record.amount
for invoice in self.invoices:
deduct_amount = min(amount, invoice.balance)
amount -= deduct_amount
if deduct_amount > 0:
# TODO - refactor duplicated functionality
CreditLine.add_credit(
deduct_amount, account=invoice.subscription.account,
payment_record=payment_record,
)
CreditLine.add_credit(
-deduct_amount,
account=invoice.subscription.account,
invoice=invoice,
)
invoice.update_balance()
invoice.save()
if amount:
account = BillingAccount.get_or_create_account_by_domain(self.domain)
CreditLine.add_credit(
amount, account=account,
payment_record=payment_record,
)
def get_email_context(self):
context = super(BulkStripePaymentHandler, self).get_email_context()
context.update({
'is_paid': all(invoice.is_paid for invoice in self.invoices),
'domain': self.domain,
'balance': self.balance,
})
return context
class CreditStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/credit_receipt_email.html'
receipt_email_template_plaintext = 'accounting/credit_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain, account, subscription=None,
product_type=None, feature_type=None):
super(CreditStripePaymentHandler, self).__init__(payment_method, domain)
self.product_type = product_type
self.feature_type = feature_type
self.account = account
self.subscription = subscription
@property
def cost_item_name(self):
return "%(credit_type)s Credit %(sub_or_account)s" % {
'credit_type': ("%s Product" % self.product_type
if self.product_type is not None
else "%s Feature" % self.feature_type),
'sub_or_account': ("Subscription %s" % self.subscription
if self.subscription is None
else "Account %s" % self.account.id),
}
def get_charge_amount(self, request):
return Decimal(request.POST['amount'])
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description="Payment for %s" % self.cost_item_name,
)
def update_credits(self, payment_record):
self.credit_line = CreditLine.add_credit(
payment_record.amount, account=self.account, subscription=self.subscription,
product_type=self.product_type, feature_type=self.feature_type,
payment_record=payment_record,
)
def process_request(self, request):
response = super(CreditStripePaymentHandler, self).process_request(request)
if hasattr(self, 'credit_line'):
response.update({
'balance': fmt_dollar_amount(self.credit_line.balance),
})
return response
def get_email_context(self):
context = super(CreditStripePaymentHandler, self).get_email_context()
if self.product_type:
credit_name = _("%s Software Plan" % self.product_type)
else:
credit_name = get_feature_name(self.feature_type, self.core_product)
context.update({
'credit_name': credit_name,
})
return context
|
puttarajubr/commcare-hq
|
corehq/apps/accounting/payment_handlers.py
|
Python
|
bsd-3-clause
| 13,441
|
# Dormbase -- open-source dormitory database system
# Copyright (C) 2012 Alex Chernyakhovsky <achernya@mit.edu>
# Drew Dennison <dennison@mit.edu>
# Isaac Evans <ine@mit.edu>
# Luke O'Malley <omalley1@mit.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.shortcuts import redirect
def report(request):
# Base URL
url = 'https://insidemit-apps.mit.edu/apps/building_services/CreateResidRepairOrder.action?sapSystemId=PS1'
# Auto-Fill Building Number
if hasattr(settings, 'BUILDING'):
url += '&pageInfo.bldg1=' + settings.BUILDING + '&pageInfo.contactInfo.bldg=' + settings.BUILDING
# TODO: fill in user's room, phone number, check if they actually live here
return redirect(url)
|
dormbase/dormbase
|
dormbase/facilities/views.py
|
Python
|
gpl-3.0
| 1,448
|
# -*- encoding: utf-8 -*-
"""Implements User UI."""
from robottelo.ui.base import Base, UINoSuchElementError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class LdapAuthSource(Base):
"""Implements CRUD functions from UI."""
def navigate_to_entity(self):
"""Navigate to LDAP auth source entity page"""
Navigator(self.browser).go_to_ldap_auth()
def _search_locator(self):
"""Specify locator for LDAP auth source entity search procedure"""
return locators['ldapserver.ldap_servername']
def create(self, name=None, server=None, ldaps=False, port=None,
server_type=None, login_name=None, first_name=None,
surname=None, mail=None, photo=None, account_user=None,
account_passwd=None, account_basedn=None,
account_grpbasedn=None, ldap_filter=False, otf_register=True):
"""Create new ldap auth source from UI."""
if not self.wait_until_element(locators['ldapsource.new']):
return
self.click(locators['ldapsource.new'])
if self.wait_until_element(locators['ldapserver.name']):
self.field_update('ldapserver.name', name)
self.field_update('ldapserver.server', server)
if ldaps:
self.click(locators['ldapserver.ldaps'])
if port:
self.field_update('ldapserver.port', port)
self.select(locators['ldapserver.server_type'], server_type)
self.click(tab_locators['ldapserver.tab_account'])
if self.wait_until_element(locators['ldapserver.acc_user']) is None:
raise UINoSuchElementError(u'Could not select the attributes Tab.')
self.field_update('ldapserver.acc_user', account_user)
self.field_update('ldapserver.acc_passwd', account_passwd)
self.field_update('ldapserver.basedn', account_basedn)
self.field_update('ldapserver.group_basedn', account_grpbasedn)
if ldap_filter:
self.click(locators['ldapserver.ldap_filter'])
if otf_register:
self.click(locators['ldapserver.otf_register'])
self.click(tab_locators['ldapserver.tab_attributes'])
if self.wait_until_element(locators['ldapserver.loginname']) is None:
raise UINoSuchElementError(u'Could not select the account Tab.')
self.field_update('ldapserver.loginname', login_name)
self.field_update('ldapserver.firstname', first_name)
self.field_update('ldapserver.surname', surname)
self.field_update('ldapserver.mail', mail)
if photo:
self.field_update('ldapserver.photo', photo)
self.click(common_locators['submit'])
def search(self, name):
"""Searches existing ldap auth source from UI. It is necessary to use
custom search as we don't have both search bar and search button there.
"""
self.navigate_to_entity()
strategy1, value1 = self._search_locator()
return self.wait_until_element((strategy1, value1 % (name, name)))
def delete(self, name, really=True):
"""Deletes existing ldap auth source from UI."""
self.delete_entity(
name,
really,
locators['ldapserver.ldap_delete'],
)
|
blrm/robottelo
|
robottelo/ui/ldapauthsource.py
|
Python
|
gpl-3.0
| 3,345
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from .managers import ChatMessageManager
class ChatMessage(models.Model):
"""A persisted chat message."""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
help_text="Message sender"
)
# -------------------------------------------------------------------------
# NOTE: Right now, 1-1 chat rooms are of the form `chat-<userid>-<userid>`.
# See the `set_chatmessage_recipients` post_save handler to see how
# recipients get saved.
# -------------------------------------------------------------------------
recipients = models.ManyToManyField(
settings.AUTH_USER_MODEL,
blank=True,
related_name='chatmessages_received',
help_text='Message recipient(s)'
)
room = models.CharField(max_length=256, default="", db_index=True)
text = models.TextField(default="")
read = models.BooleanField(default=False)
# NOTE: This is an md5 digest of the message's author + text + creation time.
# It's used as an intial ID for the message, which we need to know prior
# to the object's creation time (for read receipts)
digest = models.CharField(max_length=32, blank=True, default='', db_index=True)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text
class Meta:
ordering = ['-created_on']
verbose_name = "Chat Message"
verbose_name_plural = "Chat Messages"
objects = ChatMessageManager()
@receiver(post_save, sender=ChatMessage, dispatch_uid='set-chatmessage-recipients')
def set_chatmessage_recipients(sender, instance, created, **kwargs):
"""After a message is saved, we try to set the Users who would have been
the message's recipients. This is not done in the websocket consumer,
because we don't have all the recipient info available.
Here's the strategy:
1. Person-to-person messages have a chat room of the form `chat-<id>-<id>`.
If that's the kind of message we get, we'll look up those users.
2. TODO: Otherwise, we'll pull all the messages from the related Chat Group
"""
User = get_user_model()
if instance and instance.room.startswith("chat-"):
ids = instance.room[len("chat-"):].split('-')
users = User.objects.filter(pk__in=ids)
users = users.exclude(pk=instance.user.id)
for user in users:
instance.recipients.add(user)
# TODO: do this for groups.
class ChatGroup(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL) # Group "owner"
name = models.CharField(max_length=256, unique=True)
slug = models.SlugField(max_length=256, unique=True)
members = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name="chatgroup_member"
)
updated_on = models.DateTimeField(auto_now=True)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
ordering = ['-created_on']
verbose_name = "Chat Group"
verbose_name_plural = "Chat Group"
@property
def members_count(self):
return self.members.all().count()
def get_absolute_url(self):
return reverse("chat:group-chat", args=[self.pk, self.slug])
|
izzyalonso/tndata_backend
|
tndata_backend/chat/models.py
|
Python
|
mit
| 3,508
|
# coding=utf-8
import argparse
import json
import logging
from retry import retry
import helpers
import requests
logger = logging.getLogger('to_sqlite')
handler = logging.FileHandler('to_sqlite.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('----- START -----')
@retry(tries=10, delay=20)
def send_to_aws(name, data_in):
data = {
'sensorId': name,
'ts': data_in['ts'],
'temperature': data_in['temperature'],
}
requests.post(helpers.STORAGE_ROOT_URL + 'addOne', data=json.dumps(data))
@helpers.exception(logger=logger)
def main():
parser = argparse.ArgumentParser(
description='Read temperature from stdin and send it to AWS.')
parser.add_argument('--name', type=str, help='Name of the sensor.')
args = parser.parse_args()
data_in = helpers.read_stdin()
send_to_aws(args.name, data_in)
# data_out = json.dumps(data_in)
# print(data_out.encode('utf-8'))
logger.info('----- END -----')
if __name__ == '__main__':
main()
|
termopetteri/raspberry-sensors
|
to_aws.py
|
Python
|
mit
| 1,164
|
"""
Test 'webhooks.apps' file
"""
from django.apps import AppConfig
from django.apps import apps as zenboard_apps
class TestWebhooksConfig:
"""
Test 'webhooks.apps.WebhooksConfig'
"""
def test_boards_app_config(self):
"""Test 'webhooks' module `AppConfig` instance"""
webhooks_app_config = zenboard_apps.get_app_config('webhooks')
assert isinstance(webhooks_app_config, AppConfig)
assert webhooks_app_config.name == 'webhooks'
|
pawelad/zenboard
|
tests/webhooks/test_apps.py
|
Python
|
apache-2.0
| 478
|
#!/usr/bin/env python
import numpy,sys,matplotlib.pyplot as plt,operator,pickle
#print plt.style.available
#plt.style.use('ggplot')
nrprim = 600*28800000
if len(sys.argv) < 3:
print "Specify at least one input and one output. First input is the one the output is relative to."
sys.exit()
filesin = sys.argv[1:-1]
fileout = sys.argv[-1]
data = [] #rows=files,column[0]=filename
for filename in filesin:
datatmp = [filename]
if filename.endswith('.txt'):
header = open(filename,'r')
for line in header:
newline = line.strip()
datatmp.append(float(newline.split()[-1]))
if filename.endswith('.raw'):
datatmp.extend(numpy.fromfile(filename, dtype='<f4').tolist())
if filename.endswith('.pylist'):
datatmp.extend(pickle.load(open(filename)))
data.append(datatmp)
datanew = []
for dataset in data[::-1]: #start at the end
datatmp = []
for index,val in enumerate(dataset):
if index is 0:
datatmp.append(val)
continue #do not modify filename
try:
datatmp.append((data[0][index]-val) / data[0][index])
except ZeroDivisionError:
datatmp.append(0)
datanew.append(datatmp)
#for datindex in range(1,len(data)): #start at the end
# for valindex in range(datindex):
# if valindex is 0:
# continue #do not modify filename
# try:
# data[datindex][valindex] = (data[0][valindex]-data[datindex][valindex]) / data[0][valindex]
# except ZeroDivisionError:
# val = 0
for dataset in datanew:
plt.plot(dataset[1:], label=dataset[0],alpha=0.5)
plt.ylabel('Yield')
#plt.ylabel('PG energy')
#plt.legend(loc=4,prop={'size':6})
plt.legend(prop={'size':10})
plt.savefig(fileout)
|
brenthuisman/phd_tools
|
plot.1d.rel.py
|
Python
|
lgpl-3.0
| 1,611
|
#!/usr/bin/env python3
import unittest
from scapy.layers.l2 import Ether
from scapy.layers.inet6 import IPv6
from framework import VppTestCase
class TestL2tp(VppTestCase):
""" L2TP Test Case """
@classmethod
def setUpClass(cls):
super(TestL2tp, cls).setUpClass()
cls.create_pg_interfaces(range(1))
cls.pg0.admin_up()
cls.pg0.config_ip6()
def test_l2tp_decap_local(self):
""" L2TP don't accept packets unless configured """
pkt = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6, nh=115))
self.pg0.add_stream(pkt)
self.pg_start()
# l2tp should not accept packets
err = self.statistics.get_counter(
'/err/l2tp-decap-local/l2tpv3 session not found')[0]
self.assertEqual(err, 0)
err_count = err
self.vapi.l2tpv3_create_tunnel(client_address=self.pg0.local_ip6,
our_address=self.pg0.remote_ip6)
self.pg0.add_stream(pkt)
self.pg_start()
# l2tp accepts packets
err = self.statistics.get_counter(
'/err/l2tp-decap-local/l2tpv3 session not found')[0]
self.assertEqual(err, 1)
err_count = err
|
vpp-dev/vpp
|
test/test_l2tp.py
|
Python
|
apache-2.0
| 1,305
|
# Generated by Django 2.0.3 on 2018-03-21 08:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flow_rpt', '0007_remove_stage_comment'),
]
operations = [
migrations.AlterModelOptions(
name='flow',
options={'ordering': ['-created_time']},
),
]
|
cmos3511/cmos_linux
|
python/op/op_site/flow_rpt/migrations/0008_auto_20180321_1624.py
|
Python
|
gpl-3.0
| 355
|
class BaseItem(object):
def __init__(self):
super(BaseItem, self).__init__()
|
chrisbrake/PythonSandbox
|
character/items/item.py
|
Python
|
bsd-3-clause
| 90
|
from __future__ import unicode_literals
from django.contrib.admin.util import label_for_field
from .utils import AppTestCase
from .testapp.models import SimpleModel, ConcreteModel, AbstractModel
class AdminTests(AppTestCase):
"""
Test admin features
"""
def test_list_label(self):
# Ensure model data is correct
self.assertEqual(SimpleModel._parler_meta.root_model._meta.get_field_by_name('tr_title')[0].verbose_name, "Translated Title")
# See that adding a field to the admin list_display also receives the translated title
# This happens by TranslatedFieldDescriptor.short_description
self.assertEqual(label_for_field('tr_title', SimpleModel), "Translated Title")
def test_list_label_abc(self):
# Ensure model data is correct
self.assertEqual(ConcreteModel._parler_meta.root_model._meta.get_field_by_name('tr_title')[0].verbose_name, "Translated Title")
# See that the TranslatedFieldDescriptor of the concrete model properly routes to the proper model
self.assertEqual(label_for_field('tr_title', ConcreteModel), "Translated Title")
# See that the TranslatedFieldDescriptor of the abstract model handles the fallback properly.
self.assertEqual(label_for_field('tr_title', AbstractModel), "Tr title")
|
ellmetha/django-parler
|
parler/tests/test_admin.py
|
Python
|
apache-2.0
| 1,318
|
"""
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
# pylint: disable=invalid-name
from .core import FittableModel, Model
from astropy.units import Quantity
__all__ = ['Mapping', 'Identity', 'UnitsMapping']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
self._inputs = ()
self._outputs = ()
if n_inputs is None:
self._n_inputs = max(mapping) + 1
else:
self._n_inputs = n_inputs
self._n_outputs = len(mapping)
super().__init__(name=name, meta=meta)
self.inputs = tuple('x' + str(idx) for idx in range(self._n_inputs))
self.outputs = tuple('x' + str(idx) for idx in range(self._n_outputs))
self._mapping = mapping
self._input_units_strict = {key: False for key in self._inputs}
self._input_units_allow_dimensionless = {key: False for key in self._inputs}
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return f'<Mapping({self.mapping})>'
return f'<Mapping({self.mapping}, name={self.name!r})>'
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError(f'{name} expects {self.n_inputs} inputs; got {len(args)}')
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
inv._n_inputs = len(inv._inputs)
inv._n_outputs = len(inv._outputs)
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return f'<Identity({self.n_inputs})>'
return f'<Identity({self.n_inputs}, name={self.name!r})>'
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
class UnitsMapping(Model):
"""
Mapper that operates on the units of the input, first converting to
canonical units, then assigning new units without further conversion.
Used by Model.coerce_units to support units on otherwise unitless models
such as Polynomial1D.
Parameters
----------
mapping : tuple
A tuple of (input_unit, output_unit) pairs, one per input, matched to the
inputs by position. The first element of the each pair is the unit that
the model will accept (specify ``dimensionless_unscaled``
to accept dimensionless input). The second element is the unit that the
model will return. Specify ``dimensionless_unscaled``
to return dimensionless Quantity, and `None` to return raw values without
Quantity.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : dict or bool, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like, optional
Free-form metadata to associate with this model.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),)) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
def __init__(
self,
mapping,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
name=None,
meta=None
):
self._mapping = mapping
none_mapping_count = len([m for m in mapping if m[-1] is None])
if none_mapping_count > 0 and none_mapping_count != len(mapping):
raise ValueError("If one return unit is None, then all must be None")
# These attributes are read and handled by Model
self._input_units_strict = True
self.input_units_equivalencies = input_units_equivalencies
self._input_units_allow_dimensionless = input_units_allow_dimensionless
super().__init__(name=name, meta=meta)
# Can't invoke this until after super().__init__, since
# we need self.inputs and self.outputs to be populated.
self._rebuild_units()
def _rebuild_units(self):
self._input_units = {input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping)}
@property
def n_inputs(self):
return len(self._mapping)
@property
def n_outputs(self):
return len(self._mapping)
@property
def inputs(self):
return super().inputs
@inputs.setter
def inputs(self, value):
super(UnitsMapping, self.__class__).inputs.fset(self, value)
self._rebuild_units()
@property
def outputs(self):
return super().outputs
@outputs.setter
def outputs(self, value):
super(UnitsMapping, self.__class__).outputs.fset(self, value)
self._rebuild_units()
@property
def input_units(self):
return self._input_units
@property
def mapping(self):
return self._mapping
def evaluate(self, *args):
result = []
for arg, (_, return_unit) in zip(args, self.mapping):
if isinstance(arg, Quantity):
value = arg.value
else:
value = arg
if return_unit is None:
result.append(value)
else:
result.append(Quantity(value, return_unit, subok=True))
if self.n_outputs == 1:
return result[0]
else:
return tuple(result)
def __repr__(self):
if self.name is None:
return f"<UnitsMapping({self.mapping})>"
else:
return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
|
dhomeier/astropy
|
astropy/modeling/mappings.py
|
Python
|
bsd-3-clause
| 10,827
|
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3:
string_types = str,
else:
string_types = basestring,
from pybindgen.utils import any, mangle_name
import warnings
import traceback
from pybindgen.typehandlers.base import Parameter, ReturnValue, \
join_ctype_and_name, CodeGenerationError, \
param_type_matcher, return_type_matcher, CodegenErrorBase, \
DeclarationsScope, CodeBlock, NotSupportedError, ForwardWrapperBase, ReverseWrapperBase, \
TypeConfigurationError
from pybindgen.typehandlers.codesink import NullCodeSink, MemoryCodeSink
from pybindgen.cppattribute import CppInstanceAttributeGetter, CppInstanceAttributeSetter, \
CppStaticAttributeGetter, CppStaticAttributeSetter, \
PyGetSetDef, PyMetaclass
from pybindgen.pytypeobject import PyTypeObject, PyNumberMethods, PySequenceMethods
from pybindgen.cppcustomattribute import CppCustomInstanceAttributeGetter, CppCustomInstanceAttributeSetter
from pybindgen import settings
from pybindgen import utils
from pybindgen.cppclass_container import CppClassContainerTraits
from . import function
import collections
# Prepare for python 3.9
try:
collectionsCallable = collections.Callable
except AttributeError:
import collections.abc
collectionsCallable = collections.abc.Callable
try:
set
except NameError:
from sets import Set as set
def _type_no_ref(value_type):
if value_type.type_traits.type_is_reference:
return str(value_type.type_traits.target)
else:
return str(value_type.type_traits.ctype_no_modifiers)
def get_python_to_c_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_python_to_c_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_name = _type_no_ref(value)
if val_name != value.ctype:
value = ReturnValue.new(val_name)
val_converter = root_module.generate_python_to_c_type_converter(value, code_sink)
elif isinstance(value, Parameter):
val_name = _type_no_ref(value)
val_return_type = ReturnValue.new(val_name)
val_converter = root_module.generate_python_to_c_type_converter(val_return_type, code_sink)
else:
raise ValueError("Don't know how to convert %r" % (value,))
return val_converter, val_name
def get_c_to_python_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_c_to_python_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_converter = root_module.generate_c_to_python_type_converter(value, code_sink)
val_name = _type_no_ref(value)
elif isinstance(value, Parameter):
val_return_type = ReturnValue.new(value.ctype)
val_converter = root_module.generate_c_to_python_type_converter(val_return_type, code_sink)
val_name = _type_no_ref(value)
else:
raise ValueError("Don't know how to convert %s" % str(value))
return val_converter, val_name
class MemoryPolicy(object):
"""memory management policy for a C++ class or C/C++ struct"""
def __init__(self):
if type(self) is MemoryPolicy:
raise NotImplementedError("class is abstract")
def get_free_code(self, object_expression):
"""
Return a code statement to free an underlying C/C++ object.
"""
raise NotImplementedError
def get_pointer_type(self, class_full_name):
return "%s *" % (class_full_name,)
def get_pointer_to_void_name(self, object_name):
return "%s" % object_name
def get_instance_creation_function(self):
return default_instance_creation_function
def get_delete_code(self, cpp_class):
raise NotImplementedError
def get_pystruct_init_code(self, cpp_class, obj):
return ''
def register_ptr_parameter_and_return(self, cls, name):
class ThisClassPtrParameter(CppClassPtrParameter):
"""Register this C++ class as pass-by-pointer parameter"""
CTYPES = []
cpp_class = cls
cls.ThisClassPtrParameter = ThisClassPtrParameter
try:
param_type_matcher.register(name+'*', cls.ThisClassPtrParameter)
except ValueError:
pass
class ThisClassPtrReturn(CppClassPtrReturnValue):
"""Register this C++ class as pointer return"""
CTYPES = []
cpp_class = cls
cls.ThisClassPtrReturn = ThisClassPtrReturn
try:
return_type_matcher.register(name+'*', cls.ThisClassPtrReturn)
except ValueError:
pass
def register_ptr_alias_parameter_and_return(self, cls, alias):
cls.ThisClassPtrParameter.CTYPES.append(alias+'*')
try:
param_type_matcher.register(alias+'*', cls.ThisClassPtrParameter)
except ValueError: pass
cls.ThisClassPtrReturn.CTYPES.append(alias+'*')
try:
return_type_matcher.register(alias+'*', cls.ThisClassPtrReturn)
except ValueError: pass
class ReferenceCountingPolicy(MemoryPolicy):
def write_incref(self, code_block, obj_expr):
"""
Write code to increase the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
def write_decref(self, code_block, obj_expr):
"""
Write code to decrease the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
class ReferenceCountingMethodsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_method, decref_method, peekref_method=None):
super(ReferenceCountingMethodsPolicy, self).__init__()
self.incref_method = incref_method
self.decref_method = decref_method
self.peekref_method = peekref_method
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.incref_method))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.decref_method))
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" tmp->%s();\n"
"}"
% (cpp_class.full_name, self.decref_method))
return delete_code
def __repr__(self):
return 'cppclass.ReferenceCountingMethodsPolicy(incref_method=%r, decref_method=%r, peekref_method=%r)' \
% (self.incref_method, self.decref_method, self.peekref_method)
class ReferenceCountingFunctionsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_function, decref_function, peekref_function=None):
super(ReferenceCountingFunctionsPolicy, self).__init__()
self.incref_function = incref_function
self.decref_function = decref_function
self.peekref_function = peekref_function
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.incref_function, obj_expr))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.decref_function, obj_expr))
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" %s(tmp);\n"
"}"
% (cpp_class.full_name, self.decref_function))
return delete_code
def __repr__(self):
return 'cppclass.ReferenceCountingFunctionsPolicy(incref_function=%r, decref_function=%r, peekref_function=%r)' \
% (self.incref_function, self.decref_function, self.peekref_function)
class FreeFunctionPolicy(MemoryPolicy):
def __init__(self, free_function):
super(FreeFunctionPolicy, self).__init__()
self.free_function = free_function
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" %s(tmp);\n"
"}"
% (cpp_class.full_name, self.free_function))
return delete_code
def __repr__(self):
return 'cppclass.FreeFunctionPolicy(%r)' % self.free_function
class SmartPointerPolicy(MemoryPolicy):
pointer_template = None # class should fill this or create descriptor/getter
def default_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
"""
Default "instance creation function"; it is called whenever a new
C++ class instance needs to be created; this default
implementation uses a standard C++ new allocator.
:param cpp_class: the CppClass object whose instance is to be created
:param code_block: CodeBlock object on which the instance creation code should be generated
:param lvalue: lvalue expression that should hold the result in the end
:param parameters: stringified list of parameters
:param construct_type_name: actual name of type to be constructed (it is
not always the class name, sometimes it's
the python helper class)
"""
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code(
"%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
class CppHelperClass(object):
"""
Generates code for a C++ proxy subclass that takes care of
forwarding virtual methods from C++ to Python.
"""
def __init__(self, class_):
"""
:param class_: original CppClass wrapper object
"""
self.class_ = class_
self.name = class_.pystruct + "__PythonHelper"
self.virtual_parent_callers = {}
self.virtual_proxies = []
self.cannot_be_constructed = False
self.custom_methods = []
self.post_generation_code = []
self.virtual_methods = []
def add_virtual_method(self, method):
assert method.is_virtual
assert method.class_ is not None
for existing in self.virtual_methods:
if method.matches_signature(existing):
return # don't re-add already existing method
if isinstance(method, CppDummyMethod):
if method.is_pure_virtual:
self.cannot_be_constructed = True
else:
self.virtual_methods.append(method)
if not method.is_pure_virtual:
if settings._get_deprecated_virtuals():
vis = ['public', 'protected']
else:
vis = ['protected']
if method.visibility in vis:
parent_caller = CppVirtualMethodParentCaller(method)
#parent_caller.class_ = method.class_
parent_caller.helper_class = self
parent_caller.main_wrapper = method # XXX: need to explain this
self.add_virtual_parent_caller(parent_caller)
proxy = CppVirtualMethodProxy(method)
proxy.main_wrapper = method # XXX: need to explain this
self.add_virtual_proxy(proxy)
def add_virtual_parent_caller(self, parent_caller):
"""Add a new CppVirtualMethodParentCaller object to this helper class"""
assert isinstance(parent_caller, CppVirtualMethodParentCaller)
name = parent_caller.method_name
try:
overload = self.virtual_parent_callers[name]
except KeyError:
overload = CppOverloadedMethod(name)
## implicit conversions + virtual methods disabled
## temporarily until I can figure out how to fix the unit
## tests.
overload.enable_implicit_conversions = False
#overload.static_decl = False
overload.pystruct = self.class_.pystruct
self.virtual_parent_callers[name] = overload
assert self.class_ is not None
for existing in overload.wrappers:
if parent_caller.matches_signature(existing):
break # don't re-add already existing method
else:
overload.add(parent_caller)
def add_custom_method(self, declaration, body=None):
"""
Add a custom method to the helper class, given by a
declaration line and a body. The body can be None, in case
the whole method definition is included in the declaration
itself.
"""
self.custom_methods.append((declaration, body))
def add_post_generation_code(self, code):
"""
Add custom code to be included right after the helper class is generated.
"""
self.post_generation_code.append(code)
def add_virtual_proxy(self, virtual_proxy):
"""Add a new CppVirtualMethodProxy object to this class"""
assert isinstance(virtual_proxy, CppVirtualMethodProxy)
self.virtual_proxies.append(virtual_proxy)
def generate_forward_declarations(self, code_sink_param):
"""
Generate the proxy class (declaration only) to a given code sink
"""
code_sink = MemoryCodeSink()
if self._generate_forward_declarations(code_sink):
code_sink.flush_to(code_sink_param)
else:
self.cannot_be_constructed = True
def _generate_forward_declarations(self, code_sink):
"""
Generate the proxy class (declaration only) to a given code sink.
Returns True if all is well, False if a pure virtual method
was found that could not be generated.
"""
code_sink.writeln("class %s : public %s\n{\npublic:" %
(self.name, self.class_.full_name))
code_sink.indent()
code_sink.writeln("PyObject *m_pyself;")
if not self.class_.import_from_module:
## replicate the parent constructors in the helper class
implemented_constructor_signatures = []
for cons in self.class_.constructors:
## filter out duplicated constructors
signature = [param.ctype for param in cons.parameters]
if signature in implemented_constructor_signatures:
continue
implemented_constructor_signatures.append(signature)
params = [join_ctype_and_name(param.ctype, param.name)
for param in cons.parameters]
code_sink.writeln("%s(%s)" % (self.name, ', '.join(params)))
code_sink.indent()
code_sink.writeln(": %s(%s), m_pyself(NULL)\n{}" %
(self.class_.full_name,
', '.join([param.name for param in cons.parameters])))
code_sink.unindent()
code_sink.writeln()
## add the set_pyobj method
code_sink.writeln("""
void set_pyobj(PyObject *pyobj)
{
Py_XDECREF(m_pyself);
Py_INCREF(pyobj);
m_pyself = pyobj;
}
""")
## write a destructor
code_sink.writeln("virtual ~%s()\n{" % self.name)
code_sink.indent()
code_sink.writeln("Py_CLEAR(m_pyself);")
code_sink.unindent()
code_sink.writeln("}\n")
if not self.class_.import_from_module:
## write the parent callers (_name)
for parent_caller in self.virtual_parent_callers.values():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
parent_caller.reset_code_generation_state()
## test code generation
try:
try:
utils.call_with_error_handling(parent_caller.generate,
(NullCodeSink(),), {}, parent_caller)
except utils.SkipWrapper:
continue
finally:
parent_caller.reset_code_generation_state()
code_sink.writeln()
parent_caller.generate_class_declaration(code_sink)
for parent_caller_wrapper in parent_caller.wrappers:
parent_caller_wrapper.generate_parent_caller_method(code_sink)
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
## test code generation
#virtual_proxy.class_ = self.class_
#virtual_proxy.helper_class = self
virtual_proxy.reset_code_generation_state()
try:
try:
utils.call_with_error_handling(virtual_proxy.generate,
(NullCodeSink(),), {}, virtual_proxy)
except utils.SkipWrapper:
if virtual_proxy.method.is_pure_virtual:
return False
continue
finally:
virtual_proxy.reset_code_generation_state()
code_sink.writeln()
virtual_proxy.generate_declaration(code_sink)
for custom_declaration, dummy in self.custom_methods:
code_sink.writeln(custom_declaration)
code_sink.unindent()
code_sink.writeln("};\n")
if not self.class_.import_from_module:
for code in self.post_generation_code:
code_sink.writeln(code)
code_sink.writeln()
return True
def generate(self, code_sink):
"""
Generate the proxy class (virtual method bodies only) to a given code sink.
returns pymethodef list of parent callers
"""
if self.class_.import_from_module:
return
## write the parent callers (_name)
method_defs = []
for name, parent_caller in self.virtual_parent_callers.items():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
code_sink.writeln()
## parent_caller.generate(code_sink)
try:
utils.call_with_error_handling(parent_caller.generate,
(code_sink,), {}, parent_caller)
except utils.SkipWrapper:
continue
if settings._get_deprecated_virtuals():
parent_caller_name = '_'+name
else:
parent_caller_name = name
method_defs.append(parent_caller.get_py_method_def(parent_caller_name))
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
code_sink.writeln()
## virtual_proxy.generate(code_sink)
try:
utils.call_with_error_handling(virtual_proxy.generate,
(code_sink,), {}, virtual_proxy)
except utils.SkipWrapper:
assert not virtual_proxy.method.is_pure_virtual
continue
for dummy, custom_body in self.custom_methods:
if custom_body:
code_sink.writeln(custom_body)
return method_defs
class CppClass(object):
"""
A CppClass object takes care of generating the code for wrapping a C++ class
"""
def __init__(self, name, parent=None, incref_method=None, decref_method=None,
automatic_type_narrowing=None, allow_subclassing=None,
is_singleton=False, outer_class=None,
peekref_method=None,
template_parameters=(), custom_template_class_name=None,
incomplete_type=False, free_function=None,
incref_function=None, decref_function=None,
python_name=None, memory_policy=None,
foreign_cpp_namespace=None,
docstring=None,
custom_name=None,
import_from_module=None,
destructor_visibility='public'
):
"""
:param name: class name
:param parent: optional parent class wrapper, or list of
parents. Valid values are None, a CppClass
instance, or a list of CppClass instances.
:param incref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that increments the
reference count (may be inherited from parent
if not given)
:param decref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that decrements the
reference count (may be inherited from parent
if not given)
:param automatic_type_narrowing: if True, automatic return type
narrowing will be done on objects
of this class and its descendants
when returned by pointer from a
function or method.
:param allow_subclassing: if True, generated class wrappers will
allow subclassing in Python.
:param is_singleton: if True, the class is considered a singleton,
and so the python wrapper will never call the
C++ class destructor to free the value.
:param peekref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that returns the current reference count.
:param free_function: (deprecated in favour of memory_policy) name of C function used to deallocate class instances
:param incref_function: (deprecated in favour of memory_policy) same as incref_method, but as a function instead of method
:param decref_function: (deprecated in favour of memory_policy) same as decref_method, but as a function instead of method
:param python_name: name of the class as it will appear from
Python side. This parameter is DEPRECATED in favour of
custom_name.
:param memory_policy: memory management policy; if None, it
inherits from the parent class. Only root classes can have a
memory policy defined.
:type memory_policy: L{MemoryPolicy}
:param foreign_cpp_namespace: if set, the class is assumed to
belong to the given C++ namespace, regardless of the C++
namespace of the python module it will be added to. For
instance, this can be useful to wrap std classes, like
std::ofstream, without having to create an extra python
submodule.
:param docstring: None or a string containing the docstring
that will be generated for the class
:param custom_name: an alternative name to give to this class
at python-side; if omitted, the name of the class in the
python module will be the same name as the class in C++
(minus namespace).
:param import_from_module: if not None, the type is imported
from a foreign Python module with the given name.
"""
assert outer_class is None or isinstance(outer_class, CppClass)
self.incomplete_type = incomplete_type
self.outer_class = outer_class
self._module = None
self.name = name
self.docstring = docstring
self.mangled_name = None
self.mangled_full_name = None
self.template_parameters = template_parameters
self.container_traits = None
self.import_from_module = import_from_module
assert destructor_visibility in ['public', 'private', 'protected']
self.destructor_visibility = destructor_visibility
self.custom_name = custom_name
if custom_template_class_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = custom_template_class_name
if python_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = python_name
self.is_singleton = is_singleton
self.foreign_cpp_namespace = foreign_cpp_namespace
self.full_name = None # full name with C++ namespaces attached and template parameters
self.methods = collections.OrderedDict() # name => OverloadedMethod
self._dummy_methods = [] # methods that have parameter/retval binding problems
self.nonpublic_methods = []
self.constructors = [] # (name, wrapper) pairs
self.pytype = PyTypeObject()
self.slots = self.pytype.slots
self.helper_class = None
self.instance_creation_function = None
self.post_instance_creation_function = None
## set to True when we become aware generating the helper
## class is not going to be possible
self.helper_class_disabled = False
self.cannot_be_constructed = '' # reason
self.has_trivial_constructor = False
self.has_copy_constructor = False
self.has_output_stream_operator = False
self._have_pure_virtual_methods = None
self._wrapper_registry = None
self.binary_comparison_operators = set()
self.binary_numeric_operators = dict()
self.inplace_numeric_operators = dict()
self.unary_numeric_operators = dict()
self.valid_sequence_methods = {"__len__" : "sq_length",
"__add__" : "sq_concat",
"__mul__" : "sq_repeat",
"__getitem__" : "sq_item",
"__getslice__" : "sq_slice",
"__setitem__" : "sq_ass_item",
"__setslice__" : "sq_ass_slice",
"__contains__" : "sq_contains",
"__iadd__" : "sq_inplace_concat",
"__imul__" : "sq_inplace_repeat"}
## list of CppClasses from which a value of this class can be
## implicitly generated; corresponds to a
## operator ThisClass(); in the other class.
self.implicitly_converts_from = []
## list of hook functions to call just prior to helper class
## code generation.
self.helper_class_hooks = []
self._pystruct = None #"***GIVE ME A NAME***"
self.metaclass_name = "***GIVE ME A NAME***"
self.pytypestruct = "***GIVE ME A NAME***"
self.instance_attributes = PyGetSetDef("%s__getsets" % self._pystruct)
self.static_attributes = PyGetSetDef("%s__getsets" % self.metaclass_name)
if isinstance(parent, list):
self.bases = list(parent)
self.parent = self.bases[0]
elif isinstance(parent, CppClass):
self.parent = parent
self.bases = [parent]
elif parent is None:
self.parent = None
self.bases = []
else:
raise TypeError("'parent' must be None, CppClass instance, or a list of CppClass instances")
if free_function:
warnings.warn("Use FreeFunctionPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = FreeFunctionPolicy(free_function)
elif incref_method:
warnings.warn("Use ReferenceCountingMethodsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingMethodsPolicy(incref_method, decref_method, peekref_method)
elif incref_function:
warnings.warn("Use ReferenceCountingFunctionsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingFunctionsPolicy(incref_function, decref_function)
if not self.bases:
assert memory_policy is None or isinstance(memory_policy, MemoryPolicy)
self.memory_policy = memory_policy
else:
for base in self.bases:
if base.memory_policy is not None:
self.memory_policy = base.memory_policy
assert memory_policy is None, \
"changing memory policy from parent (%s) to child (%s) class not permitted" \
% (base.name, self.name)
break
else:
self.memory_policy = memory_policy
if automatic_type_narrowing is None:
if not self.bases:
self.automatic_type_narrowing = settings.automatic_type_narrowing
else:
self.automatic_type_narrowing = self.parent.automatic_type_narrowing
else:
self.automatic_type_narrowing = automatic_type_narrowing
if allow_subclassing is None:
if self.parent is None:
self.allow_subclassing = settings.allow_subclassing
else:
self.allow_subclassing = self.parent.allow_subclassing
else:
if any([p.allow_subclassing for p in self.bases]) and not allow_subclassing:
raise ValueError("Cannot disable subclassing if a parent class allows it")
else:
self.allow_subclassing = allow_subclassing
if self.destructor_visibility not in ['public', 'protected']:
self.allow_subclassing = False
self.typeid_map_name = None
if name != 'dummy':
## register type handlers
class ThisClassParameter(CppClassParameter):
"""Register this C++ class as pass-by-value parameter"""
CTYPES = []
cpp_class = self
self.ThisClassParameter = ThisClassParameter
try:
param_type_matcher.register(name, self.ThisClassParameter)
except ValueError:
pass
class ThisClassRefParameter(CppClassRefParameter):
"""Register this C++ class as pass-by-reference parameter"""
CTYPES = []
cpp_class = self
self.ThisClassRefParameter = ThisClassRefParameter
try:
param_type_matcher.register(name+'&', self.ThisClassRefParameter)
except ValueError:
pass
class ThisClassReturn(CppClassReturnValue):
"""Register this C++ class as value return"""
CTYPES = []
cpp_class = self
self.ThisClassReturn = ThisClassReturn
self.ThisClassRefReturn = ThisClassReturn
try:
return_type_matcher.register(name, self.ThisClassReturn)
return_type_matcher.register(name, self.ThisClassRefReturn)
except ValueError:
pass
if self.memory_policy is not None:
self.memory_policy.register_ptr_parameter_and_return(self, name)
else: # Regular pointer
class ThisClassPtrParameter(CppClassPtrParameter):
"""Register this C++ class as pass-by-pointer parameter"""
CTYPES = []
cpp_class = self
self.ThisClassPtrParameter = ThisClassPtrParameter
try:
param_type_matcher.register(name+'*', self.ThisClassPtrParameter)
except ValueError:
pass
class ThisClassPtrReturn(CppClassPtrReturnValue):
"""Register this C++ class as pointer return"""
CTYPES = []
cpp_class = self
self.ThisClassPtrReturn = ThisClassPtrReturn
try:
return_type_matcher.register(name+'*', self.ThisClassPtrReturn)
except ValueError:
pass
class ThisClassRefReturn(CppClassRefReturnValue):
"""Register this C++ class as reference return"""
CTYPES = []
cpp_class = self
self.ThisClassRefReturn = ThisClassRefReturn
try:
return_type_matcher.register(name+'&', self.ThisClassRefReturn)
except ValueError:
pass
def __repr__(self):
return "<pybindgen.CppClass %r>" % self.full_name
def add_container_traits(self, *args, **kwargs):
assert self.container_traits is None
self.container_traits = CppClassContainerTraits(self, *args, **kwargs)
def add_binary_comparison_operator(self, operator):
"""
Add support for a C++ binary comparison operator, such as == or <.
The binary operator is assumed to operate with both operands
of the type of the class, either by reference or by value.
:param operator: string indicating the name of the operator to
support, e.g. '=='
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['==', '!=', '<', '<=', '>', '>=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
self.binary_comparison_operators.add(operator)
def add_binary_numeric_operator(self, operator, result_cppclass=None,
left_cppclass=None, right=None):
"""
Add support for a C++ binary numeric operator, such as +, -, \\*, or /.
:param operator: string indicating the name of the operator to
support, e.g. '=='
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['+', '-', '*', '/']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.binary_numeric_operators[operator]
except KeyError:
l = []
self.binary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
if right is None:
right = self
elif isinstance(right, CppClass):
pass
else:
if isinstance(right, string_types):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
op = (result_cppclass, left_cppclass, right)
if op not in l:
l.append(op)
def add_inplace_numeric_operator(self, operator, right=None):
"""
Add support for a C++ inplace numeric operator, such as +=, -=, \\*=, or /=.
:param operator: string indicating the name of the operator to
support, e.g. '+='
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['+=', '-=', '*=', '/=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.inplace_numeric_operators[operator]
except KeyError:
l = []
self.inplace_numeric_operators[operator] = l
if right is None:
right = self
else:
if isinstance(right, string_types):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
if right not in l:
l.append((self, self, right))
def add_unary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None):
"""
Add support for a C++ unary numeric operators, currently only -.
:param operator: string indicating the name of the operator to
support, e.g. '-'
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['-']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.unary_numeric_operators[operator]
except KeyError:
l = []
self.unary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
op = (result_cppclass, left_cppclass)
if op not in l:
l.append(op)
def add_class(self, *args, **kwargs):
"""
Add a nested class. See L{CppClass} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_class(*args, **kwargs)
def add_enum(self, *args, **kwargs):
"""
Add a nested enum. See L{Enum} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_enum(*args, **kwargs)
def get_mro(self):
"""
Get the method resolution order (MRO) of this class.
:return: an iterator that gives CppClass objects, from leaf to root class
"""
to_visit = [self]
visited = set()
while to_visit:
cls = to_visit.pop(0)
visited.add(cls)
yield cls
for base in cls.bases:
if base not in visited:
to_visit.append(base)
def get_all_methods(self):
"""Returns an iterator to iterate over all methods of the class"""
for overload in self.methods.values():
for method in overload.wrappers:
yield method
for method in self.nonpublic_methods:
yield method
def get_have_pure_virtual_methods(self):
"""
Returns True if the class has pure virtual methods with no
implementation (which would mean the type is not instantiable
directly, only through a helper class).
"""
if self._have_pure_virtual_methods is not None:
return self._have_pure_virtual_methods
mro = list(self.get_mro())
mro_reversed = list(mro)
mro_reversed.reverse()
self._have_pure_virtual_methods = False
for pos, cls in enumerate(mro_reversed):
for method in list(cls.get_all_methods()) + cls._dummy_methods:
if not isinstance(method, CppMethod):
continue
if method.is_pure_virtual:
## found a pure virtual method; now go see in the
## child classes, check if any of them implements
## this pure virtual method.
implemented = False
for child_cls in mro_reversed[pos+1:]:
for child_method in list(child_cls.get_all_methods()) + child_cls._dummy_methods:
if not isinstance(child_method, CppMethod):
continue
if not child_method.is_virtual:
continue
if not child_method.matches_signature(method):
continue
if not child_method.is_pure_virtual:
implemented = True
break
if implemented:
break
if not implemented:
self._have_pure_virtual_methods = True
return self._have_pure_virtual_methods
have_pure_virtual_methods = property(get_have_pure_virtual_methods)
def is_subclass(self, other):
"""Return True if this CppClass instance represents a class that is a
subclass of another class represented by the CppClasss object \\`other\\'."""
if not isinstance(other, CppClass):
raise TypeError
return other in self.get_mro()
def add_helper_class_hook(self, hook):
"""
Add a hook function to be called just prior to a helper class
being generated. The hook function applies to this class and
all subclasses. The hook function is called like this::
hook_function(helper_class)
"""
if not isinstance(hook, collectionsCallable):
raise TypeError("hook function must be callable")
self.helper_class_hooks.append(hook)
def _get_all_helper_class_hooks(self):
"""
Returns a list of all helper class hook functions, including
the ones registered with parent classes. Parent hooks will
appear first in the list.
"""
l = []
for cls in self.get_mro():
l = cls.helper_class_hooks + l
return l
def set_instance_creation_function(self, instance_creation_function):
"""Set a custom function to be called to create instances of this
class and its subclasses.
:param instance_creation_function: instance creation function; see
default_instance_creation_function()
for signature and example.
"""
self.instance_creation_function = instance_creation_function
def set_post_instance_creation_function(self, post_instance_creation_function):
"""Set a custom function to be called to add code after an
instance is created (usually by the "instance creation
function") and registered with the Python runtime.
:param post_instance_creation_function: post instance creation function
"""
self.post_instance_creation_function = post_instance_creation_function
def get_instance_creation_function(self):
for cls in self.get_mro():
if cls.instance_creation_function is not None:
return cls.instance_creation_function
if cls.memory_policy is not None:
return cls.memory_policy.get_instance_creation_function()
return default_instance_creation_function
def get_post_instance_creation_function(self):
for cls in self.get_mro():
if cls.post_instance_creation_function is not None:
return cls.post_instance_creation_function
return None
def write_create_instance(self, code_block, lvalue, parameters, construct_type_name=None):
instance_creation_func = self.get_instance_creation_function()
if construct_type_name is None:
construct_type_name = self.get_construct_name()
instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def write_post_instance_creation_code(self, code_block, lvalue, parameters, construct_type_name=None):
post_instance_creation_func = self.get_post_instance_creation_function()
if post_instance_creation_func is None:
return
if construct_type_name is None:
construct_type_name = self.get_construct_name()
post_instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def get_pystruct(self):
if self._pystruct is None:
raise ValueError
return self._pystruct
pystruct = property(get_pystruct)
def get_construct_name(self):
"""Get a name usable for new %s construction, or raise
CodeGenerationError if none found"""
if self.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)" % (self.full_name, self.cannot_be_constructed))
if self.have_pure_virtual_methods:
raise CodeGenerationError("%s cannot be constructed (class has pure virtual methods)" % self.full_name)
else:
return self.full_name
def implicitly_converts_to(self, other):
"""
Declares that values of this class can be implicitly converted
to another class; corresponds to a operator AnotherClass();
special method.
"""
assert isinstance(other, CppClass)
other.implicitly_converts_from.append(self)
def get_all_implicit_conversions(self):
"""
Gets a new list of all other classes whose value can be implicitly
converted to a value of this class.
>>> Foo = CppClass("Foo")
>>> Bar = CppClass("Bar")
>>> Zbr = CppClass("Zbr")
>>> Bar.implicitly_converts_to(Foo)
>>> Zbr.implicitly_converts_to(Bar)
>>> l = Foo.get_all_implicit_conversions()
>>> l.sort(lambda cls1, cls2: cmp(cls1.name, cls2.name))
>>> [cls.name for cls in l]
['Bar']
"""
return list(self.implicitly_converts_from)
# classes = []
# to_visit = list(self.implicitly_converts_from)
# while to_visit:
# source = to_visit.pop(0)
# if source in classes or source is self:
# continue
# classes.append(source)
# to_visit.extend(source.implicitly_converts_from)
# return classes
def _update_names(self):
prefix = settings.name_prefix.capitalize()
if self.outer_class is None:
if self.foreign_cpp_namespace:
self.full_name = self.foreign_cpp_namespace + '::' + self.name
else:
if self._module.cpp_namespace_prefix:
if self._module.cpp_namespace_prefix == '::':
self.full_name = '::' + self.name
else:
self.full_name = self._module.cpp_namespace_prefix + '::' + self.name
else:
self.full_name = self.name
else:
assert not self.foreign_cpp_namespace
self.full_name = '::'.join([self.outer_class.full_name, self.name])
def make_upper(s):
if s and s[0].islower():
return s[0].upper()+s[1:]
else:
return s
def mangle(name):
return mangle_name(name)
def flatten(name):
"make a name like::This look LikeThis"
return ''.join([make_upper(mangle(s)) for s in name.split('::')])
self.mangled_name = flatten(self.name)
self.mangled_full_name = flatten(self.full_name)
if self.template_parameters:
self.full_name += "< %s >" % (', '.join(self.template_parameters))
mangled_template_params = '__' + '_'.join([flatten(s) for s in self.template_parameters])
self.mangled_name += mangled_template_params
self.mangled_full_name += mangled_template_params
self._pystruct = "Py%s%s" % (prefix, self.mangled_full_name)
self.metaclass_name = "%sMeta" % self.mangled_full_name
self.pytypestruct = "Py%s%s_Type" % (prefix, self.mangled_full_name)
self.instance_attributes.cname = "%s__getsets" % self._pystruct
self.static_attributes.cname = "%s__getsets" % self.metaclass_name
## re-register the class type handlers, now with class full name
self.register_alias(self.full_name)
if self.get_type_narrowing_root() is self:
self.typeid_map_name = "%s__typeid_map" % self.pystruct
else:
self.typeid_map_name = None
def register_alias(self, alias):
"""Re-register the class with another base name, in addition to any
registrations that might have already been done."""
self.module.register_type(None, alias, self)
self.ThisClassParameter.CTYPES.append(alias)
try:
param_type_matcher.register(alias, self.ThisClassParameter)
except ValueError: pass
self.ThisClassRefParameter.CTYPES.append(alias+'&')
try:
param_type_matcher.register(alias+'&', self.ThisClassRefParameter)
except ValueError: pass
self.ThisClassReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias, self.ThisClassReturn)
except ValueError: pass
if self.memory_policy is not None:
self.memory_policy.register_ptr_alias_parameter_and_return(self, alias)
else:
self.ThisClassPtrParameter.CTYPES.append(alias+'*')
try:
param_type_matcher.register(alias+'*', self.ThisClassPtrParameter)
except ValueError: pass
self.ThisClassPtrReturn.CTYPES.append(alias+'*')
try:
return_type_matcher.register(alias+'*', self.ThisClassPtrReturn)
except ValueError: pass
self.ThisClassRefReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias+'&', self.ThisClassRefReturn)
except ValueError: pass
def get_module(self):
"""Get the Module object this class belongs to"""
return self._module
def set_module(self, module):
"""Set the Module object this class belongs to"""
self._module = module
self._update_names()
module = property(get_module, set_module)
def inherit_default_constructors(self):
"""inherit the default constructors from the parentclass according to C++
language rules"""
for base in self.bases:
for cons in base.constructors:
if len(cons.parameters) == 0:
self.add_constructor([], visibility=cons.visibility)
elif (len(cons.parameters) == 1
and isinstance(cons.parameters[0], self.parent.ThisClassRefParameter)):
self.add_constructor([self.ThisClassRefParameter(
self.full_name + "&",
"obj",
cons.parameters[0].direction)],
visibility=cons.visibility)
def get_helper_class(self):
"""gets the "helper class" for this class wrapper, creating it if necessary"""
for cls in self.get_mro():
if cls.helper_class_disabled:
return None
if not self.allow_subclassing:
return None
if self.helper_class is None:
if not self.is_singleton:
self.helper_class = CppHelperClass(self)
self.module.add_include('<typeinfo>')
return self.helper_class
def get_type_narrowing_root(self):
"""Find the root CppClass along the subtree of all parent classes that
have automatic_type_narrowing=True Note: multiple inheritance
not implemented"""
if not self.automatic_type_narrowing:
return None
root = self
while (root.parent is not None
and root.parent.automatic_type_narrowing):
root = root.parent
return root
def _register_typeid(self, module):
"""register this class with the typeid map root class"""
root = self.get_type_narrowing_root()
module.after_init.write_code("%s.register_wrapper(typeid(%s), &%s);"
% (root.typeid_map_name, self.full_name, self.pytypestruct))
def _generate_typeid_map(self, code_sink, module):
"""generate the typeid map and fill it with values"""
try:
module.declare_one_time_definition("TypeIDMap")
except KeyError:
pass
else:
code_sink.writeln('''
#include <map>
#include <string>
#include <typeinfo>
#if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__)
# include <cxxabi.h>
#endif
#define PBG_TYPEMAP_DEBUG 0
namespace pybindgen {
class TypeMap
{
std::map<std::string, PyTypeObject *> m_map;
public:
TypeMap() {}
void register_wrapper(const std::type_info &cpp_type_info, PyTypeObject *python_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "register_wrapper(this=" << this << ", type_name=" << cpp_type_info.name()
<< ", python_wrapper=" << python_wrapper->tp_name << ")" << std::endl;
#endif
m_map[std::string(cpp_type_info.name())] = python_wrapper;
}
''')
if settings.gcc_rtti_abi_complete:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
if (python_wrapper)
return python_wrapper;
else {
#if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__)
// Get closest (in the single inheritance tree provided by cxxabi.h)
// registered python wrapper.
const abi::__si_class_type_info *_typeinfo =
dynamic_cast<const abi::__si_class_type_info*> (&cpp_type_info);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
while (_typeinfo && (python_wrapper = m_map[std::string(_typeinfo->name())]) == 0) {
_typeinfo = dynamic_cast<const abi::__si_class_type_info*> (_typeinfo->__base_type);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
}
#if PBG_TYPEMAP_DEBUG
if (python_wrapper) {
std::cerr << " -> found match " << std::endl;
} else {
std::cerr << " -> return fallback wrapper" << std::endl;
}
#endif
return python_wrapper? python_wrapper : fallback_wrapper;
#else // non gcc 3+ compilers can only match against explicitly registered classes, not hidden subclasses
return fallback_wrapper;
#endif
}
}
};
}
''')
else:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
return python_wrapper? python_wrapper : fallback_wrapper;
}
};
}
''')
if self.import_from_module:
code_sink.writeln("\nextern pybindgen::TypeMap *_%s;\n" % self.typeid_map_name)
code_sink.writeln("#define %s (*_%s)\n" % (self.typeid_map_name, self.typeid_map_name))
else:
code_sink.writeln("\nextern pybindgen::TypeMap %s;\n" % self.typeid_map_name)
def _add_method_obj(self, method):
"""
Add a method object to the class. For internal use.
:param method: a L{CppMethod} or L{Function} instance that can generate the method wrapper
"""
if isinstance(method, CppMethod):
name = method.mangled_name
elif isinstance(method, function.Function):
name = method.custom_name
assert isinstance(method.parameters[0], CppClassParameterBase)
assert method.parameters[0].cpp_class is self, \
"expected first parameter to be of class %s, but it is of class %s" % \
(self.full_name, method.parameters[0].cpp_class.full_name)
method.parameters[0].take_value_from_python_self = True
method.module = self.module
method.is_virtual = False
method.is_pure_virtual = False
method.self_parameter_pystruct = self.pystruct
method.visibility = 'public'
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
else:
raise TypeError
method.class_ = self
if method.visibility == 'protected' and not method.is_virtual:
helper_class = self.get_helper_class()
if helper_class is not None:
parent_caller = CppVirtualMethodParentCaller(method)
parent_caller.helper_class = helper_class
parent_caller.main_wrapper = method
helper_class.add_virtual_parent_caller(parent_caller)
elif method.visibility == 'public':
if name == '__call__': # needs special handling
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
try:
overload = self.methods[name]
except KeyError:
overload = CppOverloadedMethod(name)
overload.pystruct = self.pystruct
self.methods[name] = overload
## add it....
try:
utils.call_with_error_handling(overload.add, (method,), {}, method)
except utils.SkipWrapper:
return
# Grr! I hate C++. Overloading + inheritance = disaster!
# So I ended up coding something which C++ does not in
# fact support, but I feel bad to just throw away my good
# code due to a C++ fault, so I am leaving here the code
# disabled. Maybe some future C++ version will come along
# and fix this problem, who knows :P
if 0:
# due to a limitation of the pybindgen overloading
# strategy, we need to re-wrap for this class all
# methods with the same name and different signature
# from parent classes.
overload._compute_all_wrappers()
if isinstance(method, CppMethod):
mro = self.get_mro()
next(mro) # skip 'self'
for cls in mro:
try:
parent_overload = cls.methods[name]
except KeyError:
continue
parent_overload._compute_all_wrappers()
for parent_method in parent_overload.all_wrappers:
already_exists = False
for existing_method in overload.all_wrappers:
if existing_method.matches_signature(parent_method):
already_exists = True
break
if not already_exists:
new_method = parent_method.clone()
new_method.class_ = self
overload.add(new_method)
else:
self.nonpublic_methods.append(method)
if method.is_virtual:
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
def add_method(self, *args, **kwargs):
"""
Add a method to the class. See the documentation for
L{CppMethod.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) >= 1 and isinstance(args[0], CppMethod):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
elif len(args) >= 1 and isinstance(args[0], function.Function):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
## </compat>
else:
try:
meth = CppMethod(*args, **kwargs)
except utils.SkipWrapper:
if kwargs.get('is_virtual', False):
## if the method was supposed to be virtual, this
## is a very important fact that needs to be
## recorded in the class, even if the method is
## not wrapped.
method = CppDummyMethod(*args, **kwargs)
method.class_ = self
self._dummy_methods.append(method)
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
if helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
return None
self._add_method_obj(meth)
return meth
def add_function_as_method(self, *args, **kwargs):
"""
Add a function as method of the class. See the documentation for
L{Function.__init__} for information on accepted parameters.
TODO: explain the implicit first function parameter
"""
try:
meth = function.Function(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def add_custom_method_wrapper(self, *args, **kwargs):
"""
Adds a custom method wrapper. See L{CustomCppMethodWrapper} for more information.
"""
try:
meth = CustomCppMethodWrapper(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def set_helper_class_disabled(self, flag=True):
self.helper_class_disabled = flag
if flag:
self.helper_class = None
def set_cannot_be_constructed(self, reason):
assert isinstance(reason, string_types)
self.cannot_be_constructed = reason
def _add_constructor_obj(self, wrapper):
"""
Add a constructor to the class.
:param wrapper: a CppConstructor instance
"""
assert isinstance(wrapper, CppConstructor)
wrapper.set_class(self)
self.constructors.append(wrapper)
if not wrapper.parameters:
self.has_trivial_constructor = True # FIXME: I don't remember what is this used for anymore, maybe remove
if len(wrapper.parameters) == 1 and isinstance(wrapper.parameters[0], (CppClassRefParameter, CppClassParameter)) \
and wrapper.parameters[0].cpp_class is self and wrapper.visibility == 'public':
self.has_copy_constructor = True
def add_output_stream_operator(self):
"""
Add str() support based on C++ output stream operator.
Calling this method enables wrapping of an assumed to be defined operator function::
std::ostream & operator << (std::ostream &, MyClass const &);
The wrapper will be registered as an str() python operator,
and will call the C++ operator function to convert the value
to a string.
"""
self.has_output_stream_operator = True
self.module.add_include("<ostream>")
self.module.add_include("<sstream>")
def add_constructor(self, *args, **kwargs):
"""
Add a constructor to the class. See the documentation for
L{CppConstructor.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) == 1 and isinstance(args[0], CppConstructor):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
constructor = args[0]
elif len(args) == 1 and isinstance(args[0], function.Function):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
func = args[0]
constructor = CppFunctionAsConstructor(func.function_name, func.parameters)
constructor.module = self.module
## </compat>
else:
try:
constructor = CppConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_copy_constructor(self):
"""
Utility method to add a 'copy constructor' method to this class.
"""
try:
constructor = CppConstructor([self.ThisClassRefParameter("const %s &" % self.full_name,
'ctor_arg')])
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_function_as_constructor(self, *args, **kwargs):
"""
Wrap a function that behaves as a constructor to the class. See the documentation for
L{CppFunctionAsConstructor.__init__} for information on accepted parameters.
"""
try:
constructor = CppFunctionAsConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_static_attribute(self, name, value_type, is_const=False):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter = CppStaticAttributeGetter(value_type, self, name)
getter.stack_where_defined = traceback.extract_stack()
if is_const:
setter = None
else:
setter = CppStaticAttributeSetter(value_type, self, name)
setter.stack_where_defined = traceback.extract_stack()
self.static_attributes.add_attribute(name, getter, setter)
def add_custom_instance_attribute(self, name, value_type, getter, is_const=False, setter=None, custom_name=None,
getter_template_parameters=[],
setter_template_parameters=[]):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
:param getter: None, or name of a method of this class used to get the value
:param setter: None, or name of a method of this class used to set the value
:param getter_template_parameters: optional list of template parameters for getter function
:param setter_template_parameters: optional list of template parameters for setter function
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_custom_instance_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter_wrapper = CppCustomInstanceAttributeGetter(value_type, self, name, getter=getter,
template_parameters = getter_template_parameters)
getter_wrapper.stack_where_defined = traceback.extract_stack()
if is_const:
setter_wrapper = None
assert setter is None
else:
setter_wrapper = CppCustomInstanceAttributeSetter(value_type, self, name, setter=setter,
template_parameters = setter_template_parameters)
setter_wrapper.stack_where_defined = traceback.extract_stack()
self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper, custom_name)
def add_instance_attribute(self, name, value_type, is_const=False,
getter=None, setter=None, custom_name=None):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
:param getter: None, or name of a method of this class used to get the value
:param setter: None, or name of a method of this class used to set the value
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter_wrapper = CppInstanceAttributeGetter(value_type, self, name, getter=getter)
getter_wrapper.stack_where_defined = traceback.extract_stack()
if is_const:
setter_wrapper = None
assert setter is None
else:
setter_wrapper = CppInstanceAttributeSetter(value_type, self, name, setter=setter)
setter_wrapper.stack_where_defined = traceback.extract_stack()
self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper, custom_name)
def _inherit_helper_class_parent_virtuals(self):
"""
Given a class containing a helper class, add all virtual
methods from the all parent classes of this class.
"""
mro = self.get_mro()
next(mro) # skip 'self'
for cls in mro:
for method in cls.get_all_methods():
if not method.is_virtual:
continue
method = method.clone()
self.helper_class.add_virtual_method(method)
def _get_wrapper_registry(self):
# there is one wrapper registry object per root class only,
# which is used for all subclasses.
if self.parent is None:
if self._wrapper_registry is None:
self._wrapper_registry = settings.wrapper_registry(self.pystruct)
return self._wrapper_registry
else:
return self.parent._get_wrapper_registry()
wrapper_registry = property(_get_wrapper_registry)
def generate_forward_declarations(self, code_sink, module):
"""
Generates forward declarations for the instance and type
structures.
"""
if self.memory_policy is not None:
pointer_type = self.memory_policy.get_pointer_type(self.full_name)
else:
pointer_type = self.full_name + " *"
if self.allow_subclassing:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%sobj;
PyObject *inst_dict;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (pointer_type, self.pystruct))
else:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%sobj;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (pointer_type, self.pystruct))
code_sink.writeln()
if self.import_from_module:
code_sink.writeln('extern PyTypeObject *_%s;' % (self.pytypestruct,))
code_sink.writeln('#define %s (*_%s)' % (self.pytypestruct, self.pytypestruct))
else:
code_sink.writeln('extern PyTypeObject %s;' % (self.pytypestruct,))
if not self.static_attributes.empty():
code_sink.writeln('extern PyTypeObject Py%s_Type;' % (self.metaclass_name,))
code_sink.writeln()
if self.helper_class is not None:
self._inherit_helper_class_parent_virtuals()
for hook in self._get_all_helper_class_hooks():
hook(self.helper_class)
self.helper_class.generate_forward_declarations(code_sink)
if self.helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
if self.have_pure_virtual_methods and self.helper_class is None:
self.cannot_be_constructed = "have pure virtual methods but no helper class"
if self.typeid_map_name is not None:
self._generate_typeid_map(code_sink, module)
if self.container_traits is not None:
self.container_traits.generate_forward_declarations(code_sink, module)
if self.parent is None:
self.wrapper_registry.generate_forward_declarations(code_sink, module, self.import_from_module)
def get_python_name(self):
if self.template_parameters:
if self.custom_name is None:
class_python_name = self.mangled_name
else:
class_python_name = self.custom_name
else:
if self.custom_name is None:
class_python_name = self.name
else:
class_python_name = self.custom_name
return class_python_name
def _generate_import_from_module(self, code_sink, module):
if module.parent is None:
error_retcode = "MOD_ERROR"
else:
error_retcode = "NULL"
# TODO: skip this step if the requested typestructure is never used
if ' named ' in self.import_from_module:
module_name, type_name = self.import_from_module.split(" named ")
else:
module_name, type_name = self.import_from_module, self.name
code_sink.writeln("PyTypeObject *_%s;" % self.pytypestruct)
module.after_init.write_code("/* Import the %r class from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("{"); module.after_init.indent()
module.after_init.write_code("PyObject *module = PyImport_ImportModule((char*) \"%s\");" % module_name)
module.after_init.write_code(
"if (module == NULL) {\n"
" return %s;\n"
"}" % (error_retcode,))
module.after_init.write_code("_%s = (PyTypeObject*) PyObject_GetAttrString(module, (char*) \"%s\");\n"
% (self.pytypestruct, self.get_python_name()))
module.after_init.write_code("if (PyErr_Occurred()) PyErr_Clear();")
if self.typeid_map_name is not None:
code_sink.writeln("pybindgen::TypeMap *_%s;" % self.typeid_map_name)
module.after_init.write_code("/* Import the %r class type map from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("PyObject *_cobj = PyObject_GetAttrString(module, (char*) \"_%s\");"
% (self.typeid_map_name))
module.after_init.write_code("if (_cobj == NULL) {\n"
" _%s = new pybindgen::TypeMap;\n"
" PyErr_Clear();\n"
"} else {\n"
" _%s = reinterpret_cast<pybindgen::TypeMap*> (PyCObject_AsVoidPtr (_cobj));\n"
" Py_DECREF(_cobj);\n"
"}"
% (self.typeid_map_name, self.typeid_map_name))
if self.parent is None:
self.wrapper_registry.generate_import(code_sink, module.after_init, "module")
module.after_init.unindent(); module.after_init.write_code("}")
if self.helper_class is not None:
self.helper_class.generate(code_sink)
def generate(self, code_sink, module):
"""Generates the class to a code sink"""
if self.import_from_module:
self._generate_import_from_module(code_sink, module)
return # .......................... RETURN
if self.typeid_map_name is not None:
code_sink.writeln("\npybindgen::TypeMap %s;\n" % self.typeid_map_name)
module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));"
% (self.typeid_map_name, self.typeid_map_name))
if self.automatic_type_narrowing:
self._register_typeid(module)
if self.parent is None:
self.wrapper_registry.generate(code_sink, module)
if self.helper_class is not None:
parent_caller_methods = self.helper_class.generate(code_sink)
else:
parent_caller_methods = []
## generate getsets
instance_getsets = self.instance_attributes.generate(code_sink)
self.slots.setdefault("tp_getset", instance_getsets)
static_getsets = self.static_attributes.generate(code_sink)
## --- register the class type in the module ---
module.after_init.write_code("/* Register the '%s' class */" % self.full_name)
## generate a metaclass if needed
if static_getsets == '0':
metaclass = None
else:
if self.parent is None:
parent_typestruct = 'PyBaseObject_Type'
else:
parent_typestruct = self.parent.pytypestruct
metaclass = PyMetaclass(self.metaclass_name,
"Py_TYPE(&%s)" % parent_typestruct,
self.static_attributes)
metaclass.generate(code_sink, module)
if self.parent is not None:
assert isinstance(self.parent, CppClass)
module.after_init.write_code('%s.tp_base = &%s;' %
(self.pytypestruct, self.parent.pytypestruct))
if len(self.bases) > 1:
module.after_init.write_code('%s.tp_bases = PyTuple_New(%i);' % (self.pytypestruct, len(self.bases),))
for basenum, base in enumerate(self.bases):
module.after_init.write_code(' Py_INCREF((PyObject *) &%s);' % (base.pytypestruct,))
module.after_init.write_code(' PyTuple_SET_ITEM(%s.tp_bases, %i, (PyObject *) &%s);'
% (self.pytypestruct, basenum, base.pytypestruct))
if metaclass is not None:
module.after_init.write_code('Py_TYPE(&%s) = &%s;' %
(self.pytypestruct, metaclass.pytypestruct))
module.after_init.write_error_check('PyType_Ready(&%s)'
% (self.pytypestruct,))
class_python_name = self.get_python_name()
if self.outer_class is None:
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
class_python_name, self.pytypestruct))
else:
module.after_init.write_code(
'PyDict_SetItemString((PyObject*) %s.tp_dict, (char *) \"%s\", (PyObject *) &%s);' % (
self.outer_class.pytypestruct, class_python_name, self.pytypestruct))
have_constructor = self._generate_constructor(code_sink)
self._generate_methods(code_sink, parent_caller_methods)
if self.allow_subclassing:
self._generate_gc_methods(code_sink)
self._generate_destructor(code_sink, have_constructor)
if self.has_output_stream_operator:
self._generate_str(code_sink)
#self._generate_tp_hash(code_sink)
#self._generate_tp_compare(code_sink)
#if self.slots.get("tp_hash", "NULL") == "NULL":
# self.slots["tp_hash"] = self._generate_tp_hash(code_sink)
if self.slots.get("tp_richcompare", "NULL") == "NULL" and self.binary_comparison_operators:
self.slots["tp_richcompare"] = self._generate_tp_richcompare(code_sink)
if self.binary_numeric_operators or self.inplace_numeric_operators:
self.slots["tp_as_number"] = self._generate_number_methods(code_sink)
if self.have_sequence_methods():
self.slots["tp_as_sequence"] = self._generate_sequence_methods(code_sink)
if self.container_traits is not None:
self.container_traits.generate(code_sink, module)
self._generate_type_structure(code_sink, self.docstring)
def _generate_number_methods(self, code_sink):
number_methods_var_name = "%s__py_number_methods" % (self.mangled_full_name,)
pynumbermethods = PyNumberMethods()
pynumbermethods.slots['variable'] = number_methods_var_name
# iterate over all types and request generation of the
# convertion functions for that type (so that those functions
# are not generated in the middle of one of the wrappers we
# are about to generate)
root_module = self.module.get_root()
for dummy_op_symbol, op_types in self.binary_numeric_operators.items():
for (retval, left, right) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
for dummy_op_symbol, op_types in self.inplace_numeric_operators.items():
for (retval, left, right) in op_types:
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
get_c_to_python_converter(retval, root_module, code_sink)
for dummy_op_symbol, op_types in self.unary_numeric_operators.items():
for (retval, left) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
def try_wrap_operator(op_symbol, slot_name):
if op_symbol in self.binary_numeric_operators:
op_types = self.binary_numeric_operators[op_symbol]
elif op_symbol in self.inplace_numeric_operators:
op_types = self.inplace_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_left, PyObject *py_right)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left, right) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
right_converter, right_name = get_python_to_c_converter(right, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s left;" % left_name)
code_sink.writeln("%s right;" % right_name)
code_sink.writeln("if (%s(py_left, &left) && %s(py_right, &right)) {" % (left_converter, right_converter))
code_sink.indent()
code_sink.writeln("%s result = (left %s right);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
def try_wrap_unary_operator(op_symbol, slot_name):
if op_symbol in self.unary_numeric_operators:
op_types = self.unary_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_self)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s self;" % left_name)
code_sink.writeln("if (%s(py_self, &self)) {" % (left_converter))
code_sink.indent()
code_sink.writeln("%s result = %s(self);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
try_wrap_operator('+', 'nb_add')
try_wrap_operator('-', 'nb_subtract')
try_wrap_operator('*', 'nb_multiply')
try_wrap_operator('/', 'nb_divide')
try_wrap_operator('+=', 'nb_inplace_add')
try_wrap_operator('-=', 'nb_inplace_subtract')
try_wrap_operator('*=', 'nb_inplace_multiply')
try_wrap_operator('/=', 'nb_inplace_divide')
try_wrap_unary_operator('-', 'nb_negative')
pynumbermethods.generate(code_sink)
return '&' + number_methods_var_name
def _generate_sequence_methods(self, code_sink):
sequence_methods_var_name = "%s__py_sequence_methods" % (self.mangled_full_name,)
pysequencemethods = PySequenceMethods()
pysequencemethods.slots['variable'] = sequence_methods_var_name
root_module = self.module.get_root()
self_converter = root_module.generate_python_to_c_type_converter(self.ThisClassReturn(self.full_name), code_sink)
def try_wrap_sequence_method(py_name, slot_name):
if py_name in self.methods:
numwraps = len(self.methods[py_name].wrappers)
some_wrapper_is_function = max([isinstance(x, function.Function) for x in self.methods[py_name].wrappers])
meth_wrapper_actual_name = self.methods[py_name].wrapper_actual_name
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pysequencemethods.slots[slot_name] = wrapper_name
if py_name == "__len__" and (numwraps > 1 or some_wrapper_is_function):
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name + "_ARGS"]
else:
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name]
code_sink.writeln(template % {'wrapper_name' : wrapper_name,
'py_struct' : self._pystruct,
'method_name' : meth_wrapper_actual_name})
return
for py_name in self.valid_sequence_methods:
slot_name = self.valid_sequence_methods[py_name]
try_wrap_sequence_method(py_name, slot_name)
pysequencemethods.generate(code_sink)
return '&' + sequence_methods_var_name
def have_sequence_methods(self):
"""Determine if this object has sequence methods registered."""
for x in self.valid_sequence_methods:
if x in self.methods:
return True
return False
def _generate_type_structure(self, code_sink, docstring):
"""generate the type structure"""
self.slots.setdefault("tp_basicsize",
"sizeof(%s)" % (self.pystruct,))
tp_flags = set(['Py_TPFLAGS_DEFAULT'])
if self.allow_subclassing:
tp_flags.add("Py_TPFLAGS_HAVE_GC")
tp_flags.add("Py_TPFLAGS_BASETYPE")
self.slots.setdefault("tp_dictoffset",
"offsetof(%s, inst_dict)" % self.pystruct)
else:
self.slots.setdefault("tp_dictoffset", "0")
if self.binary_numeric_operators:
tp_flags.add("Py_TPFLAGS_CHECKTYPES")
self.slots.setdefault("tp_flags", '|'.join(sorted(tp_flags)))
if docstring is None:
docstring = self.generate_docstring()
self.slots.setdefault("tp_doc", (docstring is None and 'NULL'
or "\"%s\"" % (docstring,)))
dict_ = self.slots
dict_.setdefault("typestruct", self.pytypestruct)
if self.outer_class is None:
mod_path = self._module.get_module_path()
mod_path.append(self.mangled_name)
dict_.setdefault("tp_name", '.'.join(mod_path))
else:
dict_.setdefault("tp_name", '%s.%s' % (self.outer_class.slots['tp_name'], self.name))
## tp_call support
try:
call_method = self.methods['__call__']
except KeyError:
pass
else:
if call_method.wrapper_actual_name:
dict_.setdefault("tp_call", call_method.wrapper_actual_name)
self.pytype.generate(code_sink)
def generate_docstring(self):
name = self.get_python_name()
return "\\n".join(sorted([c.generate_docstring(name) for c in self.constructors],
key=len, reverse=True))
def _generate_constructor(self, code_sink):
"""generate the constructor, if any"""
have_constructor = True
if self.constructors and ((not self.cannot_be_constructed) or self.helper_class is not None
and not self.helper_class.cannot_be_constructed):
code_sink.writeln()
overload = CppOverloadedConstructor(None)
self.constructors_overload = overload
overload.pystruct = self.pystruct
for constructor in self.constructors:
try:
overload.add(constructor)
except CodegenErrorBase:
continue
if overload.wrappers:
try:
overload.generate(code_sink)
except utils.SkipWrapper:
constructor = None
have_constructor = False
else:
constructor = overload.wrapper_actual_name
code_sink.writeln()
else:
constructor = None
have_constructor = False
else:
## In C++, and unlike Python, constructors with
## parameters are not automatically inheritted by
## subclasses. We must generate a 'no constructor'
## tp_init to prevent this type from inheriting a
## tp_init that will allocate an instance of the
## parent class instead of this class.
code_sink.writeln()
wrapper = CppNoConstructor(self.cannot_be_constructed)
wrapper.generate(code_sink, self)
constructor = wrapper.wrapper_actual_name
have_constructor = False
code_sink.writeln()
self.slots.setdefault("tp_init", (constructor is None and "NULL"
or constructor))
return have_constructor
def _generate_copy_method(self, code_sink):
construct_name = self.get_construct_name()
copy_wrapper_name = '_wrap_%s__copy__' % self.pystruct
code_sink.writeln('''
static PyObject*\n%s(%s *self, PyObject *PYBINDGEN_UNUSED(_args))
{
''' % (copy_wrapper_name, self.pystruct))
code_sink.indent()
declarations = DeclarationsScope()
code_block = CodeBlock("return NULL;", declarations)
py_copy = declarations.declare_variable("%s*" % self.pystruct, "py_copy")
self.write_allocate_pystruct(code_block, py_copy)
code_block.write_code("%s->obj = new %s(*self->obj);" % (py_copy, construct_name))
if self.allow_subclassing:
code_block.write_code("%s->inst_dict = NULL;" % py_copy)
code_block.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_copy)
self.wrapper_registry.write_register_new_wrapper(code_block, py_copy, "%s->obj" % py_copy)
code_block.write_code("return (PyObject*) %s;" % py_copy)
declarations.get_code_sink().flush_to(code_sink)
code_block.write_cleanup()
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln()
return copy_wrapper_name
def _generate_MI_parent_methods(self, code_sink):
methods = {}
mro = self.get_mro()
next(mro)
for base in mro:
for method_name, parent_overload in base.methods.items():
# skip methods registered via special type slots, not method table
if method_name in (['__call__'] + list(self.valid_sequence_methods)):
continue
try:
overload = methods[method_name]
except KeyError:
overload = CppOverloadedMethod(method_name)
overload.pystruct = self.pystruct
methods[method_name] = overload
for parent_wrapper in parent_overload.wrappers:
if parent_wrapper.visibility != 'public':
continue
# the method may have been re-defined as private in our class
private = False
for leaf_wrapper in self.nonpublic_methods:
if leaf_wrapper.matches_signature(parent_wrapper):
private = True
break
if private:
continue
# the method may have already been wrapped in our class
already_wrapped = False
try:
overload = self.methods[method_name]
except KeyError:
pass
else:
for leaf_wrapper in overload.wrappers:
if leaf_wrapper.matches_signature(parent_wrapper):
already_wrapped = True
break
if already_wrapped:
continue
wrapper = parent_wrapper.clone()
wrapper.original_class = base
wrapper.class_ = self
overload.add(wrapper)
method_defs = []
for method_name, overload in methods.items():
if not overload.wrappers:
continue
classes = []
for wrapper in overload.wrappers:
if wrapper.original_class not in classes:
classes.append(wrapper.original_class)
if len(classes) > 1:
continue # overloading with multiple base classes is just too confusing
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
code_sink.writeln()
method_defs.append(overload.get_py_method_def(method_name))
return method_defs
def _generate_methods(self, code_sink, parent_caller_methods):
"""generate the method wrappers"""
method_defs = []
for meth_name, overload in self.methods.items():
code_sink.writeln()
#overload.generate(code_sink)
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
# skip methods registered via special type slots, not method table
if meth_name not in (['__call__'] + list(self.valid_sequence_methods)):
method_defs.append(overload.get_py_method_def(meth_name))
code_sink.writeln()
method_defs.extend(parent_caller_methods)
if len(self.bases) > 1: # https://bugs.launchpad.net/pybindgen/+bug/563786
method_defs.extend(self._generate_MI_parent_methods(code_sink))
if self.has_copy_constructor:
try:
copy_wrapper_name = utils.call_with_error_handling(self._generate_copy_method, (code_sink,), {}, self)
except utils.SkipWrapper:
pass
else:
method_defs.append('{(char *) "__copy__", (PyCFunction) %s, METH_NOARGS, NULL},' % copy_wrapper_name)
## generate the method table
code_sink.writeln("static PyMethodDef %s_methods[] = {" % (self.pystruct,))
code_sink.indent()
for methdef in method_defs:
code_sink.writeln(methdef)
code_sink.writeln("{NULL, NULL, 0, NULL}")
code_sink.unindent()
code_sink.writeln("};")
self.slots.setdefault("tp_methods", "%s_methods" % (self.pystruct,))
def _get_delete_code(self):
if self.is_singleton:
delete_code = ''
else:
if self.memory_policy is not None:
delete_code = self.memory_policy.get_delete_code(self)
else:
if self.incomplete_type:
raise CodeGenerationError("Cannot finish generating class %s: "
"type is incomplete, but no free/unref_function defined"
% self.full_name)
if self.destructor_visibility == 'public':
delete_code = (" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" if (!(self->flags&PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED)) {\n"
" delete tmp;\n"
" }" % (self.full_name,))
else:
delete_code = (" self->obj = NULL;\n")
return delete_code
def _generate_gc_methods(self, code_sink):
"""Generate tp_clear and tp_traverse"""
## --- tp_clear ---
tp_clear_function_name = "%s__tp_clear" % (self.pystruct,)
self.slots.setdefault("tp_clear", tp_clear_function_name )
delete_code = self._get_delete_code()
code_sink.writeln(r'''
static void
%s(%s *self)
{
Py_CLEAR(self->inst_dict);
%s
}
''' % (tp_clear_function_name, self.pystruct, delete_code))
## --- tp_traverse ---
tp_traverse_function_name = "%s__tp_traverse" % (self.pystruct,)
self.slots.setdefault("tp_traverse", tp_traverse_function_name )
if self.helper_class is None:
visit_self = ''
else:
if not isinstance(self.memory_policy, ReferenceCountingMethodsPolicy) or self.memory_policy.peekref_method is None:
peekref_code = ''
else:
peekref_code = " && self->obj->%s() == 1" % self.memory_policy.peekref_method
visit_self = '''
if (self->obj && typeid(*self->obj).name() == typeid(%s).name() %s)
Py_VISIT((PyObject *) self);
''' % (self.helper_class.name, peekref_code)
code_sink.writeln(r'''
static int
%s(%s *self, visitproc visit, void *arg)
{
Py_VISIT(self->inst_dict);
%s
return 0;
}
''' % (tp_traverse_function_name, self.pystruct, visit_self))
def _generate_str(self, code_sink):
"""Generate a tp_str function and register it in the type"""
tp_str_function_name = "_wrap_%s__tp_str" % (self.pystruct,)
self.slots.setdefault("tp_str", tp_str_function_name )
code_sink.writeln('''
static PyObject *
%s(%s *self)
{
std::ostringstream oss;
oss << *self->obj;
return PyUnicode_FromString(oss.str ().c_str ());
}
''' % (tp_str_function_name, self.pystruct))
def _generate_tp_hash(self, code_sink):
"""generates a tp_hash function, which returns a hash of the self->obj pointer"""
tp_hash_function_name = "_wrap_%s__tp_hash" % (self.pystruct,)
self.slots.setdefault("tp_hash", tp_hash_function_name )
code_sink.writeln('''
static long
%s(%s *self)
{
return (long) self->obj;
}
''' % (tp_hash_function_name, self.pystruct))
return tp_hash_function_name
def _generate_tp_compare(self, code_sink):
"""generates a tp_compare function, which compares the ->obj pointers"""
tp_compare_function_name = "_wrap_%s__tp_compare" % (self.pystruct,)
self.slots.setdefault("tp_compare", tp_compare_function_name )
code_sink.writeln('''
static int
%s(%s *self, %s *other)
{
if (self->obj == other->obj) return 0;
if (self->obj > other->obj) return -1;
return 1;
}
''' % (tp_compare_function_name, self.pystruct, self.pystruct))
def _generate_destructor(self, code_sink, have_constructor):
"""Generate a tp_dealloc function and register it in the type"""
## don't generate destructor if overridden by user
if "tp_dealloc" in self.slots:
return
tp_dealloc_function_name = "_wrap_%s__tp_dealloc" % (self.pystruct,)
code_sink.writeln(r'''
static void
%s(%s *self)
{''' % (tp_dealloc_function_name, self.pystruct))
code_sink.indent()
code_block = CodeBlock("PyErr_Print(); return;", DeclarationsScope())
if self.memory_policy is not None:
self.wrapper_registry.write_unregister_wrapper(code_block, 'self', self.memory_policy.get_pointer_to_void_name('self->obj'))
else:
self.wrapper_registry.write_unregister_wrapper(code_block, 'self', 'self->obj')
if self.allow_subclassing:
code_block.write_code("%s(self);" % self.slots["tp_clear"])
else:
code_block.write_code(self._get_delete_code())
code_block.write_code('Py_TYPE(self)->tp_free((PyObject*)self);')
code_block.write_cleanup()
code_block.declarations.get_code_sink().flush_to(code_sink)
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln('}\n')
self.slots.setdefault("tp_dealloc", tp_dealloc_function_name )
def _generate_tp_richcompare(self, code_sink):
tp_richcompare_function_name = "_wrap_%s__tp_richcompare" % (self.pystruct,)
code_sink.writeln("static PyObject*\n%s (%s *self, %s *other, int opid)"
% (tp_richcompare_function_name, self.pystruct, self.pystruct))
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("""
if (!PyObject_IsInstance((PyObject*) other, (PyObject*) &%s)) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}""" % self.pytypestruct)
code_sink.writeln("switch (opid)\n{")
def wrap_operator(name, opid_code):
code_sink.writeln("case %s:" % opid_code)
code_sink.indent()
if name in self.binary_comparison_operators:
code_sink.writeln("if (*self->obj %(OP)s *other->obj) {\n"
" Py_INCREF(Py_True);\n"
" return Py_True;\n"
"} else {\n"
" Py_INCREF(Py_False);\n"
" return Py_False;\n"
"}" % dict(OP=name))
else:
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
wrap_operator('<', 'Py_LT')
wrap_operator('<=', 'Py_LE')
wrap_operator('==', 'Py_EQ')
wrap_operator('!=', 'Py_NE')
wrap_operator('>=', 'Py_GE')
wrap_operator('>', 'Py_GT')
code_sink.writeln("} /* closes switch (opid) */")
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}\n")
return tp_richcompare_function_name
def generate_typedef(self, module, alias):
"""
Generates the appropriate Module code to register the class
with a new name in that module (typedef alias).
"""
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
alias, self.pytypestruct))
def write_allocate_pystruct(self, code_block, lvalue, wrapper_type=None):
"""
Generates code to allocate a python wrapper structure, using
PyObject_New or PyObject_GC_New, plus some additional strcture
initialization that may be needed.
"""
if self.allow_subclassing:
new_func = 'PyObject_GC_New'
else:
new_func = 'PyObject_New'
if wrapper_type is None:
wrapper_type = '&'+self.pytypestruct
code_block.write_code("%s = %s(%s, %s);" %
(lvalue, new_func, self.pystruct, wrapper_type))
if self.allow_subclassing:
code_block.write_code(
"%s->inst_dict = NULL;" % (lvalue,))
if self.memory_policy is not None:
code_block.write_code(self.memory_policy.get_pystruct_init_code(self, lvalue))
# from pybindgen.cppclass_typehandlers import CppClassParameter, CppClassRefParameter, \
# CppClassReturnValue, CppClassRefReturnValue, CppClassPtrParameter, CppClassPtrReturnValue, CppClassParameterBase, \
# CppClassSharedPtrParameter, CppClassSharedPtrReturnValue
#from pybindgen.function import function
from pybindgen.cppmethod import CppMethod, CppConstructor, CppNoConstructor, CppFunctionAsConstructor, \
CppOverloadedMethod, CppOverloadedConstructor, \
CppVirtualMethodParentCaller, CppVirtualMethodProxy, CustomCppMethodWrapper, \
CppDummyMethod
def common_shared_object_return(value, py_name, cpp_class, code_block,
type_traits, caller_owns_return,
reference_existing_object, type_is_pointer, caller_manages_return=True,
free_after_copy=False):
if type_is_pointer:
value_value = '(*%s)' % value
value_ptr = value
else:
value_ptr = '(&%s)' % value
value_value = value
def write_create_new_wrapper():
"""Code path that creates a new wrapper for the returned object"""
## Find out what Python wrapper to use, in case
## automatic_type_narrowing is active and we are not forced to
## make a copy of the object
if (cpp_class.automatic_type_narrowing
and (caller_owns_return or isinstance(cpp_class.memory_policy,
ReferenceCountingPolicy))):
typeid_map_name = cpp_class.get_type_narrowing_root().typeid_map_name
wrapper_type = code_block.declare_variable(
'PyTypeObject*', 'wrapper_type', '0')
code_block.write_code(
'%s = %s.lookup_wrapper(typeid(%s), &%s);'
% (wrapper_type, typeid_map_name, value_value, cpp_class.pytypestruct))
else:
wrapper_type = '&'+cpp_class.pytypestruct
## Create the Python wrapper object
cpp_class.write_allocate_pystruct(code_block, py_name, wrapper_type)
if cpp_class.allow_subclassing:
code_block.write_code(
"%s->inst_dict = NULL;" % (py_name,))
## Assign the C++ value to the Python wrapper
if caller_owns_return:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
else:
if not isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
if reference_existing_object:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,))
else:
if caller_manages_return:
# The PyObject creates its own copy
if not cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(cpp_class.full_name))
cpp_class.write_create_instance(code_block,
"%s->obj" % py_name,
value_value)
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
cpp_class.write_post_instance_creation_code(code_block,
"%s->obj" % py_name,
value_value)
else:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,))
else:
if caller_manages_return:
## The PyObject gets a new reference to the same obj
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
cpp_class.memory_policy.write_incref(code_block, value_ptr)
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s*) (%s);" %
(py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
else:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,))
## closes def write_create_new_wrapper():
if cpp_class.helper_class is None:
try:
if cpp_class.memory_policy is not None:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, cpp_class.memory_policy.get_pointer_to_void_name(value_ptr))
else:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, value_ptr)
except NotSupportedError:
write_create_new_wrapper()
if cpp_class.memory_policy is not None:
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, cpp_class.memory_policy.get_pointer_to_void_name("%s->obj" % py_name))
else:
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
else:
code_block.write_code("if (%s == NULL) {" % py_name)
code_block.indent()
write_create_new_wrapper()
if cpp_class.memory_policy is not None:
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, cpp_class.memory_policy.get_pointer_to_void_name("%s->obj" % py_name))
else:
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
code_block.unindent()
# If we are already referencing the existing python wrapper,
# we do not need a reference to the C++ object as well.
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
code_block.write_code("} else {")
code_block.indent()
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.unindent()
code_block.write_code("}")
else:
code_block.write_code("}")
else:
# since there is a helper class, check if this C++ object is an instance of that class
# http://stackoverflow.com/questions/579887/how-expensive-is-rtti/1468564#1468564
code_block.write_code("if (typeid(%s).name() == typeid(%s).name())\n{"
% (value_value, cpp_class.helper_class.name))
code_block.indent()
# yes, this is an instance of the helper class; we can get
# the existing python wrapper directly from the helper
# class...
if type_traits.target_is_const:
const_cast_value = "const_cast<%s *>(%s) " % (cpp_class.full_name, value_ptr)
else:
const_cast_value = value_ptr
code_block.write_code(
"%s = reinterpret_cast< %s* >(reinterpret_cast< %s* >(%s)->m_pyself);"
% (py_name, cpp_class.pystruct,
cpp_class.helper_class.name, const_cast_value))
code_block.write_code("%s->obj = %s;" % (py_name, const_cast_value))
# We are already referencing the existing python wrapper,
# so we do not need a reference to the C++ object as well.
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.write_code("Py_INCREF(%s);" % py_name)
code_block.unindent()
code_block.write_code("} else {") # if (typeid(*(%s)) == typeid(%s)) { ...
code_block.indent()
# no, this is not an instance of the helper class, we may
# need to create a new wrapper, or reference existing one
# if the wrapper registry tells us there is one already.
# first check in the wrapper registry...
try:
if cpp_class.memory_policy is not None:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, cpp_class.memory_policy.get_pointer_to_void_name(value_ptr))
else:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, value_ptr)
except NotSupportedError:
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
else:
code_block.write_code("if (%s == NULL) {" % py_name)
code_block.indent()
# wrapper registry told us there is no wrapper for
# this instance => need to create new one
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
code_block.unindent()
# handle ownership rules...
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
code_block.write_code("} else {")
code_block.indent()
# If we are already referencing the existing python wrapper,
# we do not need a reference to the C++ object as well.
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.unindent()
code_block.write_code("}")
else:
code_block.write_code("}")
code_block.unindent()
code_block.write_code("}") # closes: if (typeid(*(%s)) == typeid(%s)) { ... } else { ...
class CppClassParameterBase(Parameter):
"Base class for all C++ Class parameter handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN]
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False, default_value=None):
"""
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassParameterBase, self).__init__(
ctype, name, direction, is_const, default_value)
## name of the PyFoo * variable used in parameter parsing
self.py_name = None
## it True, this parameter is 'fake', and instead of being
## passed a parameter from python it is assumed to be the
## 'self' parameter of a method wrapper
self.take_value_from_python_self = False
class CppClassReturnValueBase(ReturnValue):
"Class return handlers -- base class"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
def __init__(self, ctype, is_const=False):
super(CppClassReturnValueBase, self).__init__(ctype, is_const=is_const)
## name of the PyFoo * variable used in return value building
self.py_name = None
class CppClassParameter(CppClassParameterBase):
"""
Class parameter "by-value" handler
"""
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN]
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.take_value_from_python_self:
self.py_name = 'self'
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions()
if not implicit_conversion_sources:
if self.default_value is not None:
self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name, 'NULL')
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True)
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct, self.py_name, self.default_value))
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
if self.default_value is None:
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name)
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name)
else:
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name, 'NULL')
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=True)
if self.default_value is None:
wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
else:
wrapper.before_call.write_code(
"if (%s == NULL) {\n"
" %s = %s;" %
(self.py_name, tmp_value_variable, self.default_value))
wrapper.before_call.write_code(
"} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
for conversion_source in implicit_conversion_sources:
wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, conversion_source.pytypestruct,
tmp_value_variable,
conversion_source.pystruct, self.py_name))
wrapper.before_call.write_code("} else {\n")
wrapper.before_call.indent()
possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources])
wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name))
wrapper.before_call.write_error_return()
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
wrapper.call_params.append(tmp_value_variable)
def convert_c_to_python(self, wrapper):
'''Write some code before calling the Python method.'''
assert isinstance(wrapper, ReverseWrapperBase)
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.before_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
wrapper.build_params.add_parameter("N", [self.py_name])
class CppClassRefParameter(CppClassParameterBase):
"Class& handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN,
Parameter.DIRECTION_OUT,
Parameter.DIRECTION_INOUT]
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False,
default_value=None, default_value_type=None):
"""
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassRefParameter, self).__init__(
ctype, name, direction, is_const, default_value)
self.default_value_type = default_value_type
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.direction == Parameter.DIRECTION_IN:
if self.take_value_from_python_self:
self.py_name = 'self'
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions()
if not (implicit_conversion_sources and self.type_traits.target_is_const):
if self.default_value is not None:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name, 'NULL')
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True)
if self.default_value_type is not None:
default_value_name = wrapper.declarations.declare_variable(
self.default_value_type, "%s_default" % self.name,
self.default_value)
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct,
self.py_name, default_value_name))
else:
self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct,
self.py_name, self.default_value))
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
if self.default_value is not None:
warnings.warn("with implicit conversions, default value "
"in C++ class reference parameters is ignored.")
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name)
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name)
wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
for conversion_source in implicit_conversion_sources:
wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, conversion_source.pytypestruct,
tmp_value_variable,
conversion_source.pystruct, self.py_name))
wrapper.before_call.write_code("} else {\n")
wrapper.before_call.indent()
possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources])
wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name))
wrapper.before_call.write_error_return()
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
wrapper.call_params.append(tmp_value_variable)
elif self.direction == Parameter.DIRECTION_OUT:
assert not self.take_value_from_python_self
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.after_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
'')
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
'')
wrapper.call_params.append('*%s->obj' % (self.py_name,))
wrapper.build_params.add_parameter("N", [self.py_name])
## well, personally I think inout here doesn't make much sense
## (it's just plain confusing), but might as well support it..
## C++ class reference inout parameters allow "inplace"
## modifications, i.e. the object is not explicitly returned
## but is instead modified by the callee.
elif self.direction == Parameter.DIRECTION_INOUT:
assert not self.take_value_from_python_self
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*%s->obj' % (self.py_name))
def convert_c_to_python(self, wrapper):
'''Write some code before calling the Python method.'''
assert isinstance(wrapper, ReverseWrapperBase)
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.before_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
if self.direction == Parameter.DIRECTION_IN:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
wrapper.build_params.add_parameter("N", [self.py_name])
else:
## out/inout case:
## the callee receives a "temporary wrapper", which loses
## the ->obj pointer after the python call; this is so
## that the python code directly manipulates the object
## received as parameter, instead of a copy.
if self.type_traits.target_is_const:
value = "(%s*) (&(%s))" % (self.cpp_class.full_name, self.value)
else:
value = "&(%s)" % self.value
wrapper.before_call.write_code(
"%s->obj = %s;" % (self.py_name, value))
wrapper.build_params.add_parameter("O", [self.py_name])
wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name)
if self.cpp_class.has_copy_constructor:
## if after the call we notice the callee kept a reference
## to the pyobject, we then swap pywrapper->obj for a copy
## of the original object. Else the ->obj pointer is
## simply erased (we never owned this object in the first
## place).
wrapper.after_call.write_code(
"if (Py_REFCNT(%s) == 1)\n"
" %s->obj = NULL;\n"
"else{\n" % (self.py_name, self.py_name))
wrapper.after_call.indent()
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % self.py_name,
self.value)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
## it's not safe for the python wrapper to keep a
## pointer to the object anymore; just set it to NULL.
wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,))
class CppClassReturnValue(CppClassReturnValueBase):
"Class return handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
REQUIRES_ASSIGNMENT_CONSTRUCTOR = True
def __init__(self, ctype, is_const=False):
"""override to fix the ctype parameter with namespace information"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassReturnValue, self).__init__(ctype, is_const=is_const)
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
if self.type_traits.type_is_reference:
raise NotSupportedError
return "return %s();" % (self.cpp_class.full_name,)
def convert_c_to_python(self, wrapper):
"""see ReturnValue.convert_c_to_python"""
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
self.cpp_class.write_allocate_pystruct(wrapper.after_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.after_call.write_code(
"%s->inst_dict = NULL;" % (py_name,))
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name,
"%s->obj" % py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % py_name,
self.value)
#...
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""see ReturnValue.convert_python_to_c"""
if self.type_traits.type_is_reference:
raise NotSupportedError
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR:
wrapper.after_call.write_code('%s %s = *%s->obj;' %
(self.cpp_class.full_name, self.value, name))
else:
wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name))
class CppClassRefReturnValue(CppClassReturnValueBase):
"Class return handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
REQUIRES_ASSIGNMENT_CONSTRUCTOR = True
def __init__(self, ctype, is_const=False, caller_owns_return=False, reference_existing_object=None,
return_internal_reference=None, caller_manages_return=True):
#override to fix the ctype parameter with namespace information
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassRefReturnValue, self).__init__(ctype, is_const=is_const)
self.reference_existing_object = reference_existing_object
self.return_internal_reference = return_internal_reference
if self.return_internal_reference:
assert self.reference_existing_object is None
self.reference_existing_object = True
self.caller_owns_return = caller_owns_return
self.caller_manages_return = caller_manages_return
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
if (
self.type_traits.type_is_reference
and not self.type_traits.target_is_const
):
raise NotSupportedError("non-const reference return not supported")
return "{static %s __err; return __err;}" % (self.cpp_class.full_name,)
def convert_c_to_python(self, wrapper):
"""see ReturnValue.convert_c_to_python"""
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
if self.reference_existing_object or self.caller_owns_return or not self.caller_manages_return:
common_shared_object_return(self.value, py_name, self.cpp_class, wrapper.after_call,
self.type_traits, self.caller_owns_return,
self.reference_existing_object,
type_is_pointer=False,
caller_manages_return=self.caller_manages_return)
else:
self.cpp_class.write_allocate_pystruct(wrapper.after_call, py_name)
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name,
"%s->obj" % py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % py_name,
self.value)
#...
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""see ReturnValue.convert_python_to_c"""
if (
self.type_traits.type_is_reference
and not self.type_traits.target_is_const
):
raise NotSupportedError("non-const reference return not supported")
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR:
wrapper.after_call.write_code('%s %s = *%s->obj;' %
(self.cpp_class.full_name, self.value, name))
else:
wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name))
class CppClassPtrParameter(CppClassParameterBase):
"Class* handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN,
Parameter.DIRECTION_OUT,
Parameter.DIRECTION_INOUT]
SUPPORTS_TRANSFORMATIONS = True
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, transfer_ownership=None, custodian=None, is_const=False,
null_ok=False, default_value=None):
"""
Type handler for a pointer-to-class parameter (MyClass*)
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
:param transfer_ownership: if True, the callee becomes
responsible for freeing the object. If False, the
caller remains responsible for the object. In
either case, the original object pointer is passed,
not a copy. In case transfer_ownership=True, it is
invalid to perform operations on the object after
the call (calling any method will cause a null
pointer dereference and crash the program).
:param custodian: if given, points to an object (custodian)
that keeps the python wrapper for the
parameter alive. Possible values are:
- None: no object is custodian;
- -1: the return value object;
- 0: the instance of the method in which
the ReturnValue is being used will become the
custodian;
- integer > 0: parameter number, starting at 1
(i.e. not counting the self/this parameter),
whose object will be used as custodian.
:param is_const: if true, the parameter has a const attached to the leftmost
:param null_ok: if true, None is accepted and mapped into a C NULL pointer
:param default_value: default parameter value (as C expression
string); probably, the only default value that makes sense
here is probably 'NULL'.
.. note::
Only arguments which are instances of C++ classes
wrapped by PyBindGen can be used as custodians.
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassPtrParameter, self).__init__(
ctype, name, direction, is_const, default_value)
if transfer_ownership is None and self.type_traits.target_is_const:
transfer_ownership = False
self.custodian = custodian
self.transfer_ownership = transfer_ownership
self.null_ok = null_ok
if transfer_ownership is None:
raise TypeConfigurationError("Missing transfer_ownership option")
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.take_value_from_python_self:
self.py_name = 'self'
value_ptr = 'self->obj'
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name,
initializer=(self.default_value and 'NULL' or None))
value_ptr = wrapper.declarations.declare_variable("%s*" % self.cpp_class.full_name,
"%s_ptr" % self.name)
if self.null_ok:
num = wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check(
"%s && ((PyObject *) %s != Py_None) && !PyObject_IsInstance((PyObject *) %s, (PyObject *) &%s)"
% (self.py_name, self.py_name, self.py_name, self.cpp_class.pytypestruct),
'PyErr_SetString(PyExc_TypeError, "Parameter %i must be of type %s");' % (num, self.cpp_class.name))
wrapper.before_call.write_code("if (%(PYNAME)s) {\n"
" if ((PyObject *) %(PYNAME)s == Py_None)\n"
" %(VALUE)s = NULL;\n"
" else\n"
" %(VALUE)s = %(PYNAME)s->obj;\n"
"} else {\n"
" %(VALUE)s = NULL;\n"
"}" % dict(PYNAME=self.py_name, VALUE=value_ptr))
else:
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_code("%s = (%s ? %s->obj : NULL);" % (value_ptr, self.py_name, self.py_name))
value = self.transformation.transform(self, wrapper.declarations, wrapper.before_call, value_ptr)
wrapper.call_params.append(value)
if self.transfer_ownership:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
# if we transfer ownership, in the end we no longer own the object, so clear our pointer
wrapper.after_call.write_code('if (%s) {' % self.py_name)
wrapper.after_call.indent()
if self.cpp_class.memory_policy is not None:
self.cpp_class.wrapper_registry.write_unregister_wrapper(wrapper.after_call,
'%s' % self.py_name,
self.cpp_class.memory_policy.get_pointer_to_void_name('%s->obj' % self.py_name))
else:
self.cpp_class.wrapper_registry.write_unregister_wrapper(wrapper.after_call,
'%s' % self.py_name,
'%s->obj' % self.py_name)
wrapper.after_call.write_code('%s->obj = NULL;' % self.py_name)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
wrapper.before_call.write_code("if (%s) {" % self.py_name)
wrapper.before_call.indent()
self.cpp_class.memory_policy.write_incref(wrapper.before_call, "%s->obj" % self.py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
def convert_c_to_python(self, wrapper):
"""foo"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
def write_create_new_wrapper():
"""Code path that creates a new wrapper for the parameter"""
## Find out what Python wrapper to use, in case
## automatic_type_narrowing is active and we are not forced to
## make a copy of the object
if (self.cpp_class.automatic_type_narrowing
and (self.transfer_ownership or isinstance(self.cpp_class.memory_policy,
ReferenceCountingPolicy))):
typeid_map_name = self.cpp_class.get_type_narrowing_root().typeid_map_name
wrapper_type = wrapper.declarations.declare_variable(
'PyTypeObject*', 'wrapper_type', '0')
wrapper.before_call.write_code(
'%s = %s.lookup_wrapper(typeid(*%s), &%s);'
% (wrapper_type, typeid_map_name, value, self.cpp_class.pytypestruct))
else:
wrapper_type = '&'+self.cpp_class.pytypestruct
## Create the Python wrapper object
self.cpp_class.write_allocate_pystruct(wrapper.before_call, py_name, wrapper_type)
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_name)
self.py_name = py_name
## Assign the C++ value to the Python wrapper
if self.transfer_ownership:
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
else:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
## The PyObject gets a temporary pointer to the
## original value; the pointer is converted to a
## copy in case the callee retains a reference to
## the object after the call.
if self.direction == Parameter.DIRECTION_IN:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
'*'+self.value)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
'*'+self.value)
else:
## out/inout case:
## the callee receives a "temporary wrapper", which loses
## the ->obj pointer after the python call; this is so
## that the python code directly manipulates the object
## received as parameter, instead of a copy.
if self.type_traits.target_is_const:
unconst_value = "(%s*) (%s)" % (self.cpp_class.full_name, value)
else:
unconst_value = value
wrapper.before_call.write_code(
"%s->obj = %s;" % (self.py_name, unconst_value))
wrapper.build_params.add_parameter("O", [self.py_name])
wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name)
if self.cpp_class.has_copy_constructor:
## if after the call we notice the callee kept a reference
## to the pyobject, we then swap pywrapper->obj for a copy
## of the original object. Else the ->obj pointer is
## simply erased (we never owned this object in the first
## place).
wrapper.after_call.write_code(
"if (Py_REFCNT(%s) == 1)\n"
" %s->obj = NULL;\n"
"else {\n" % (self.py_name, self.py_name))
wrapper.after_call.indent()
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % self.py_name,
'*'+value)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % self.py_name,
'*'+value)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
## it's not safe for the python wrapper to keep a
## pointer to the object anymore; just set it to NULL.
wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,))
else:
## The PyObject gets a new reference to the same obj
self.cpp_class.memory_policy.write_incref(wrapper.before_call, value)
if self.type_traits.target_is_const:
wrapper.before_call.write_code("%s->obj = (%s*) (%s);" %
(py_name, self.cpp_class.full_name, value))
else:
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
## closes def write_create_new_wrapper():
if self.cpp_class.helper_class is None:
try:
if self.cpp_class.memory_policy is not None:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, self.cpp_class.memory_policy.get_pointer_to_void_name(value))
else:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}')
wrapper.build_params.add_parameter("N", [py_name])
else:
wrapper.before_call.write_code("if (typeid(*(%s)).name() == typeid(%s).name())\n{"
% (value, self.cpp_class.helper_class.name))
wrapper.before_call.indent()
if self.type_traits.target_is_const:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) ((%s*) %s))->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, self.cpp_class.full_name, value))
wrapper.before_call.write_code("%s->obj = (%s*) (%s);" %
(py_name, self.cpp_class.full_name, value))
else:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) %s)->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, value))
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
wrapper.before_call.write_code("Py_INCREF(%s);" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code("} else {")
wrapper.before_call.indent()
try:
if self.cpp_class.memory_policy is not None:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, self.cpp_class.memory_policy.get_pointer_to_void_name(value))
else:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(
wrapper.before_call, py_name, "%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}') # closes if (%s == NULL)
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes if (typeid(*(%s)) == typeid(%s))\n{
wrapper.build_params.add_parameter("N", [py_name])
class CppClassPtrReturnValue(CppClassReturnValueBase):
"Class* return handler"
CTYPES = []
SUPPORTS_TRANSFORMATIONS = True
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
def __init__(self, ctype, caller_owns_return=None, custodian=None,
is_const=False, reference_existing_object=None,
return_internal_reference=None, caller_manages_return=True, free_after_copy=False):
"""
:param ctype: C type, normally 'MyClass*'
:param caller_owns_return: if true, ownership of the object pointer
is transferred to the caller
:param free_after_copy: if true, the python wrapper must call delete on
the returned pointer once it has taken a copy.
:param custodian: bind the life cycle of the python wrapper
for the return value object (ward) to that
of the object indicated by this parameter
(custodian). Possible values are:
- None: no object is custodian;
- 0: the instance of the method in which
the ReturnValue is being used will become the
custodian;
- integer > 0: parameter number, starting at 1
(i.e. not counting the self/this parameter),
whose object will be used as custodian.
:param reference_existing_object: if true, ownership of the
pointed-to object remains to be the caller's, but we
do not make a copy. The callee gets a reference to
the existing object, but is not responsible for
freeing it. Note that using this memory management
style is dangerous, as it exposes the Python
programmer to the possibility of keeping a reference
to an object that may have been deallocated in the
mean time. Calling methods on such an object would
lead to a memory error.
:param return_internal_reference: like
reference_existing_object, but additionally adds
custodian/ward to bind the lifetime of the 'self' object
(instance the method is bound to) to the lifetime of the
return value.
.. note::
Only arguments which are instances of C++ classes
wrapped by PyBindGen can be used as custodians.
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassPtrReturnValue, self).__init__(ctype, is_const=is_const)
if caller_owns_return is None:
# For "const Foo*", we assume caller_owns_return=False by default
if self.type_traits.target_is_const:
caller_owns_return = False
self.caller_owns_return = caller_owns_return
self.caller_manages_return = caller_manages_return
self.free_after_copy = free_after_copy
self.reference_existing_object = reference_existing_object
self.return_internal_reference = return_internal_reference
if self.return_internal_reference:
assert self.reference_existing_object is None
self.reference_existing_object = True
self.custodian = custodian
if self.caller_owns_return and self.free_after_copy:
raise TypeConfigurationError("only one of caller_owns_return or free_after_copy can be given")
if self.caller_owns_return is None\
and self.free_after_copy is None \
and self.reference_existing_object is None:
raise TypeConfigurationError("Either caller_owns_return or self.reference_existing_object must be given")
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
return "return NULL;"
def convert_c_to_python(self, wrapper):
"""See ReturnValue.convert_c_to_python"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
# if value is NULL, return None
wrapper.after_call.write_code("if (!(%s)) {\n"
" Py_INCREF(Py_None);\n"
" return Py_None;\n"
"}" % value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
common_shared_object_return(value, py_name, self.cpp_class, wrapper.after_call,
self.type_traits, self.caller_owns_return,
self.reference_existing_object,
type_is_pointer=True,
caller_manages_return=self.caller_manages_return,
free_after_copy=self.free_after_copy)
# return the value
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
if self.free_after_copy:
wrapper.after_call.add_cleanup_code("delete retval;")
wrapper.after_call.add_cleanup_code("// free_after_copy for %s* %ss" % (self.cpp_class.name,wrapper.function_name))
def convert_python_to_c(self, wrapper):
"""See ReturnValue.convert_python_to_c"""
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
value = self.transformation.transform(
self, wrapper.declarations, wrapper.after_call, "%s->obj" % name)
## now the hairy part :)
if self.caller_owns_return:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
## the caller receives a copy, if possible
try:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s" % self.value,
'*'+value)
except CodeGenerationError:
copy_possible = False
else:
copy_possible = True
if copy_possible:
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s" % self.value,
'*'+value)
else:
# value = pyobj->obj; pyobj->obj = NULL;
wrapper.after_call.write_code(
"%s = %s;" % (self.value, value))
wrapper.after_call.write_code(
"%s = NULL;" % (value,))
else:
## the caller gets a new reference to the same obj
self.cpp_class.memory_policy.write_incref(wrapper.after_call, value)
if self.type_traits.target_is_const:
wrapper.after_call.write_code(
"%s = const_cast< %s* >(%s);" %
(self.value, self.cpp_class.full_name, value))
else:
wrapper.after_call.write_code(
"%s = %s;" % (self.value, value))
else:
## caller gets a shared pointer
## but this is dangerous, avoid at all cost!!!
wrapper.after_call.write_code(
"// dangerous!\n%s = %s;" % (self.value, value))
warnings.warn("Returning shared pointers is dangerous!"
" The C++ API should be redesigned "
"to avoid this situation.")
##
## Core of the custodians-and-wards implementation
##
def scan_custodians_and_wards(wrapper):
"""
Scans the return value and parameters for custodian/ward options,
converts them to add_custodian_and_ward API calls. Wrappers that
implement custodian_and_ward are: CppMethod, Function, and
CppConstructor.
"""
assert hasattr(wrapper, "add_custodian_and_ward")
for num, param in enumerate(wrapper.parameters):
custodian = getattr(param, 'custodian', None)
if custodian is not None:
wrapper.add_custodian_and_ward(custodian, num+1)
custodian = getattr(wrapper.return_value, 'custodian', None)
if custodian is not None:
wrapper.add_custodian_and_ward(custodian, -1)
if getattr(wrapper.return_value, "return_internal_reference", False):
wrapper.add_custodian_and_ward(-1, 0)
def _add_ward(code_block, custodian, ward):
wards = code_block.declare_variable(
'PyObject*', 'wards')
code_block.write_code(
"%(wards)s = PyObject_GetAttrString(%(custodian)s, (char *) \"__wards__\");"
% vars())
code_block.write_code(
"if (%(wards)s == NULL) {\n"
" PyErr_Clear();\n"
" %(wards)s = PyList_New(0);\n"
" PyObject_SetAttrString(%(custodian)s, (char *) \"__wards__\", %(wards)s);\n"
"}" % vars())
code_block.write_code(
"if (%(ward)s && !PySequence_Contains(%(wards)s, %(ward)s))\n"
" PyList_Append(%(wards)s, %(ward)s);" % dict(wards=wards, ward=ward))
code_block.add_cleanup_code("Py_DECREF(%s);" % wards)
def _get_custodian_or_ward(wrapper, num):
if num == -1:
assert wrapper.return_value.py_name is not None
return "((PyObject *) %s)" % wrapper.return_value.py_name
elif num == 0:
return "((PyObject *) self)"
else:
assert wrapper.parameters[num-1].py_name is not None
return "((PyObject *) %s)" % wrapper.parameters[num-1].py_name
def implement_parameter_custodians_precall(wrapper):
for custodian, ward, postcall in wrapper.custodians_and_wards:
if not postcall:
_add_ward(wrapper.before_call,
_get_custodian_or_ward(wrapper, custodian),
_get_custodian_or_ward(wrapper, ward))
def implement_parameter_custodians_postcall(wrapper):
for custodian, ward, postcall in wrapper.custodians_and_wards:
if postcall:
_add_ward(wrapper.after_call,
_get_custodian_or_ward(wrapper, custodian),
_get_custodian_or_ward(wrapper, ward))
|
gjcarneiro/pybindgen
|
pybindgen/cppclass.py
|
Python
|
lgpl-2.1
| 175,750
|
def extractBlissfulfairytaleCom(item):
'''
Parser for 'blissfulfairytale.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('reborn as the villain president\'s lover', 'reborn as the villain president\'s lover', 'translated'),
('i raised a sick and weak prince', 'i raised a sick and weak prince', 'translated'),
('pretty her [qt]', 'Pretty Her [QT]', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractBlissfulfairytaleCom.py
|
Python
|
bsd-3-clause
| 951
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import itertools
import os
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import external_process
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as common_utils
VALID_STATES = ['MASTER', 'BACKUP']
VALID_AUTH_TYPES = ['AH', 'PASS']
HA_DEFAULT_PRIORITY = 50
PRIMARY_VIP_RANGE_SIZE = 24
# TODO(amuller): Use L3 agent constant when new constants module is introduced.
FIP_LL_SUBNET = '169.254.30.0/23'
KEEPALIVED_SERVICE_NAME = 'keepalived'
GARP_MASTER_REPEAT = 5
GARP_MASTER_REFRESH = 10
LOG = logging.getLogger(__name__)
def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
"""Get a free IP range, from parent_range, of the specified size.
:param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
:param excluded_ranges: A list of strings to be excluded from parent_range
:param size: What should be the size of the range returned?
:return: A string representing an IP range
"""
free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
return '%s/%s' % (cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
'%(excluded_ranges)s was not found.') %
{'size': size,
'parent_range': parent_range,
'excluded_ranges': excluded_ranges})
class InvalidInstanceStateException(exceptions.NeutronException):
message = _('Invalid instance state: %(state)s, valid states are: '
'%(valid_states)s')
def __init__(self, **kwargs):
if 'valid_states' not in kwargs:
kwargs['valid_states'] = ', '.join(VALID_STATES)
super(InvalidInstanceStateException, self).__init__(**kwargs)
class InvalidAuthenticationTypeException(exceptions.NeutronException):
message = _('Invalid authentication type: %(auth_type)s, '
'valid types are: %(valid_auth_types)s')
def __init__(self, **kwargs):
if 'valid_auth_types' not in kwargs:
kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
class VIPDuplicateAddressException(exceptions.NeutronException):
message = _('Attempted to add duplicate VIP address, '
'existing vips are: %(existing_vips)s, '
'duplicate vip is: %(duplicate_vip)s')
def __init__(self, **kwargs):
kwargs['existing_vips'] = ', '.join(str(vip) for vip in
kwargs['existing_vips'])
super(VIPDuplicateAddressException, self).__init__(**kwargs)
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None):
self.ip_address = ip_address
self.interface_name = interface_name
self.scope = scope
def __eq__(self, other):
return (isinstance(other, KeepalivedVipAddress) and
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
return result
class KeepalivedVirtualRoute(object):
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None,
scope=None):
self.destination = destination
self.nexthop = nexthop
self.interface_name = interface_name
self.scope = scope
def build_config(self):
output = self.destination
if self.nexthop:
output += ' via %s' % self.nexthop
if self.interface_name:
output += ' dev %s' % self.interface_name
if self.scope:
output += ' scope %s' % self.scope
return output
class KeepalivedInstanceRoutes(object):
def __init__(self):
self.gateway_routes = []
self.extra_routes = []
self.extra_subnets = []
def remove_routes_on_interface(self, interface_name):
self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes
if gw_rt.interface_name != interface_name]
# NOTE(amuller): extra_routes are initialized from the router's
# 'routes' attribute. These routes do not have an interface
# parameter and so cannot be removed via an interface_name lookup.
self.extra_subnets = [route for route in self.extra_subnets if
route.interface_name != interface_name]
@property
def routes(self):
return self.gateway_routes + self.extra_routes + self.extra_subnets
def __len__(self):
return len(self.routes)
def build_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.routes),
[' }'])
class KeepalivedInstance(object):
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidrs,
priority=HA_DEFAULT_PRIORITY, advert_int=None,
mcast_src_ip=None, nopreempt=False,
garp_master_repeat=GARP_MASTER_REPEAT,
garp_master_refresh=GARP_MASTER_REFRESH):
self.name = 'VR_%s' % vrouter_id
if state not in VALID_STATES:
raise InvalidInstanceStateException(state=state)
self.state = state
self.interface = interface
self.vrouter_id = vrouter_id
self.priority = priority
self.nopreempt = nopreempt
self.advert_int = advert_int
self.mcast_src_ip = mcast_src_ip
self.garp_master_repeat = garp_master_repeat
self.garp_master_refresh = garp_master_refresh
self.track_interfaces = []
self.vips = []
self.virtual_routes = KeepalivedInstanceRoutes()
self.authentication = None
metadata_cidr = '169.254.169.254/32'
self.primary_vip_range = get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[metadata_cidr, FIP_LL_SUBNET] + ha_cidrs,
size=PRIMARY_VIP_RANGE_SIZE)
def set_authentication(self, auth_type, password):
if auth_type not in VALID_AUTH_TYPES:
raise InvalidAuthenticationTypeException(auth_type=auth_type)
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
if vip in self.vips:
raise VIPDuplicateAddressException(existing_vips=self.vips,
duplicate_vip=vip)
self.vips.append(vip)
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips
if vip.interface_name != interface_name]
self.virtual_routes.remove_routes_on_interface(interface_name)
def remove_vip_by_ip_address(self, ip_address):
self.vips = [vip for vip in self.vips
if vip.ip_address != ip_address]
def get_existing_vip_ip_addresses(self, interface_name):
return [vip.ip_address for vip in self.vips
if vip.interface_name == interface_name]
def _build_track_interface_config(self):
return itertools.chain(
[' track_interface {'],
(' %s' % i for i in self.track_interfaces),
[' }'])
def get_primary_vip(self):
"""Return an address in the primary_vip_range CIDR, with the router's
VRID in the host section.
For example, if primary_vip_range is 169.254.0.0/24, and this router's
VRID is 5, the result is 169.254.0.5. Using the VRID assures that
the primary VIP is consistent amongst HA router instances on different
nodes.
"""
ip = (netaddr.IPNetwork(self.primary_vip_range).network +
self.vrouter_id)
return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE)))
def _build_vips_config(self):
# NOTE(amuller): The primary VIP must be consistent in order to avoid
# keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and
# SIGHUP'ing keepalived can remove virtual routers, including the
# router's default gateway.
# We solve this by never changing the VIP in the virtual_ipaddress
# section, herein known as the primary VIP.
# The only interface known to exist for HA routers is the HA interface
# (self.interface). We generate an IP on that device and use it as the
# primary VIP. The other VIPs (Internal interfaces IPs, the external
# interface IP and floating IPs) are placed in the
# virtual_ipaddress_excluded section.
primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface)
vips_result = [' virtual_ipaddress {',
' %s' % primary.build_config(),
' }']
if self.vips:
vips_result.extend(
itertools.chain([' virtual_ipaddress_excluded {'],
(' %s' % vip.build_config()
for vip in
sorted(self.vips,
key=lambda vip: vip.ip_address)),
[' }']))
return vips_result
def _build_virtual_routes_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.virtual_routes),
[' }'])
def build_config(self):
config = ['vrrp_instance %s {' % self.name,
' state %s' % self.state,
' interface %s' % self.interface,
' virtual_router_id %s' % self.vrouter_id,
' priority %s' % self.priority,
' garp_master_repeat %s' % self.garp_master_repeat,
' garp_master_refresh %s' % self.garp_master_refresh]
if self.nopreempt:
config.append(' nopreempt')
if self.advert_int:
config.append(' advert_int %s' % self.advert_int)
if self.authentication:
auth_type, password = self.authentication
authentication = [' authentication {',
' auth_type %s' % auth_type,
' auth_pass %s' % password,
' }']
config.extend(authentication)
if self.mcast_src_ip:
config.append(' mcast_src_ip %s' % self.mcast_src_ip)
if self.track_interfaces:
config.extend(self._build_track_interface_config())
config.extend(self._build_vips_config())
if len(self.virtual_routes):
config.extend(self.virtual_routes.build_config())
config.append('}')
return config
class KeepalivedConf(object):
"""A keepalived configuration."""
def __init__(self):
self.reset()
def reset(self):
self.instances = {}
def add_instance(self, instance):
self.instances[instance.vrouter_id] = instance
def get_instance(self, vrouter_id):
return self.instances.get(vrouter_id)
def build_config(self):
config = []
for instance in self.instances.values():
config.extend(instance.build_config())
return config
def get_config_str(self):
"""Generates and returns the keepalived configuration.
:return: Keepalived configuration string.
"""
return '\n'.join(self.build_config())
class KeepalivedManager(object):
"""Wrapper for keepalived.
This wrapper permits to write keepalived config files, to start/restart
keepalived process.
"""
def __init__(self, resource_id, config, process_monitor, conf_path='/tmp',
namespace=None):
self.resource_id = resource_id
self.config = config
self.namespace = namespace
self.process_monitor = process_monitor
self.conf_path = conf_path
def get_conf_dir(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf_path))
conf_dir = os.path.join(confs_dir, self.resource_id)
return conf_dir
def get_full_config_file_path(self, filename, ensure_conf_dir=True):
conf_dir = self.get_conf_dir()
if ensure_conf_dir:
common_utils.ensure_dir(conf_dir)
return os.path.join(conf_dir, filename)
def _output_config_file(self):
config_str = self.config.get_config_str()
config_path = self.get_full_config_file_path('keepalived.conf')
utils.replace_file(config_path, config_str)
return config_path
def get_conf_on_disk(self):
config_path = self.get_full_config_file_path('keepalived.conf')
try:
with open(config_path) as conf:
return conf.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
def spawn(self):
config_path = self._output_config_file()
def callback(pid_file):
cmd = ['keepalived', '-P',
'-f', config_path,
'-p', pid_file,
'-r', '%s-vrrp' % pid_file]
return cmd
pm = self.get_process(callback=callback)
pm.enable(reload_cfg=True)
self.process_monitor.register(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME,
monitored_process=pm)
LOG.debug('Keepalived spawned with config %s', config_path)
def disable(self):
self.process_monitor.unregister(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME)
pm = self.get_process()
pm.disable(sig='15')
def get_process(self, callback=None):
return external_process.ProcessManager(
cfg.CONF,
self.resource_id,
self.namespace,
pids_path=self.conf_path,
default_cmd_callback=callback)
|
yanheven/neutron
|
neutron/agent/linux/keepalived.py
|
Python
|
apache-2.0
| 15,621
|
"""Groups API Tests for Version 1.0.
This is a testing template for the generated GroupsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.groups import GroupsAPI
from py3canvas.apis.groups import Group
from py3canvas.apis.groups import Groupmembership
class TestGroupsAPI(unittest.TestCase):
"""Tests for the GroupsAPI."""
def setUp(self):
self.client = GroupsAPI(secrets.instance_address, secrets.access_token)
def test_list_your_groups(self):
"""Integration test for the GroupsAPI.list_your_groups method."""
r = self.client.list_your_groups(context_type=None, include=None)
def test_list_groups_available_in_context_accounts(self):
"""Integration test for the GroupsAPI.list_groups_available_in_context_accounts method."""
account_id = None # Change me!!
r = self.client.list_groups_available_in_context_accounts(
account_id, include=None, only_own_groups=None
)
def test_list_groups_available_in_context_courses(self):
"""Integration test for the GroupsAPI.list_groups_available_in_context_courses method."""
course_id = None # Change me!!
r = self.client.list_groups_available_in_context_courses(
course_id, include=None, only_own_groups=None
)
def test_get_single_group(self):
"""Integration test for the GroupsAPI.get_single_group method."""
group_id = None # Change me!!
r = self.client.get_single_group(group_id, include=None)
def test_create_group_groups(self):
"""Integration test for the GroupsAPI.create_group_groups method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_create_group_group_categories(self):
"""Integration test for the GroupsAPI.create_group_group_categories method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_edit_group(self):
"""Integration test for the GroupsAPI.edit_group method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_group(self):
"""Integration test for the GroupsAPI.delete_group method."""
group_id = None # Change me!!
r = self.client.delete_group(group_id)
def test_invite_others_to_group(self):
"""Integration test for the GroupsAPI.invite_others_to_group method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_list_group_s_users(self):
"""Integration test for the GroupsAPI.list_group_s_users method."""
group_id = None # Change me!!
r = self.client.list_group_s_users(
group_id, exclude_inactive=None, include=None, search_term=None
)
def test_upload_file(self):
"""Integration test for the GroupsAPI.upload_file method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_preview_processed_html(self):
"""Integration test for the GroupsAPI.preview_processed_html method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_group_activity_stream(self):
"""Integration test for the GroupsAPI.group_activity_stream method."""
group_id = None # Change me!!
r = self.client.group_activity_stream(group_id)
def test_group_activity_stream_summary(self):
"""Integration test for the GroupsAPI.group_activity_stream_summary method."""
group_id = None # Change me!!
r = self.client.group_activity_stream_summary(group_id)
def test_permissions(self):
"""Integration test for the GroupsAPI.permissions method."""
group_id = None # Change me!!
r = self.client.permissions(group_id, permissions=None)
def test_list_group_memberships(self):
"""Integration test for the GroupsAPI.list_group_memberships method."""
group_id = None # Change me!!
r = self.client.list_group_memberships(group_id, filter_states=None)
def test_get_single_group_membership_memberships(self):
"""Integration test for the GroupsAPI.get_single_group_membership_memberships method."""
group_id = None # Change me!!
membership_id = None # Change me!!
r = self.client.get_single_group_membership_memberships(group_id, membership_id)
def test_get_single_group_membership_users(self):
"""Integration test for the GroupsAPI.get_single_group_membership_users method."""
group_id = None # Change me!!
user_id = None # Change me!!
r = self.client.get_single_group_membership_users(group_id, user_id)
def test_create_membership(self):
"""Integration test for the GroupsAPI.create_membership method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_membership_memberships(self):
"""Integration test for the GroupsAPI.update_membership_memberships method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_membership_users(self):
"""Integration test for the GroupsAPI.update_membership_users method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_leave_group_memberships(self):
"""Integration test for the GroupsAPI.leave_group_memberships method."""
group_id = None # Change me!!
membership_id = None # Change me!!
r = self.client.leave_group_memberships(group_id, membership_id)
def test_leave_group_users(self):
"""Integration test for the GroupsAPI.leave_group_users method."""
group_id = None # Change me!!
user_id = None # Change me!!
r = self.client.leave_group_users(group_id, user_id)
|
tylerclair/py3canvas
|
py3canvas/tests/groups.py
|
Python
|
mit
| 6,436
|
#!/usr/bin/env python2.7
'''
AFL crash analyzer, crash triage for the American Fuzzy Lop fuzzer
Copyright (C) 2015 floyd
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Apr 13, 2015
@author: floyd, http://floyd.ch, @floyd_ch
'''
from modules.CrashAnalysisConfig import CrashAnalysisConfig
from modules.FileDuplicateFinder import FileDuplicateFinder
from modules.SignalFinder import SignalFinder
from modules.OutputFinder import OutputFinder
from modules.InputMinimizer import InputMinimizer
from modules.FeelingLuckyExploiter import FeelingLuckyExploiter
from modules.ExploitableGdbPlugin import ExploitableGdbPlugin
from utilities.Logger import Logger
import os
import glob
def analyze_output_and_exploitability(config, signal_finder, uninteresting_signals, message_prefix=""):
for signal, signal_folder in signal_finder.get_folder_paths_for_signals_if_exist(uninteresting_signals):
skip = False
for cat in ExploitableGdbPlugin.get_classifications():
if os.path.exists(os.path.join(signal_folder, cat)):
Logger.warning("Seems like there are already exploitability analysis results, skipping. If you want to rerun: rm -r %s" % os.path.join(signal_folder, cat))
skip = True
if not skip:
Logger.info(message_prefix, "Discover stdout, stderr, gdb and ASAN output (signal %s)" % signal)
wildcard_for_run_output_files = os.path.join(signal_folder, "*" + config.run_extension)
if glob.glob(wildcard_for_run_output_files):
Logger.warning("Seems like there are already results from running the binaries, skipping. If you want to rerun: rm", wildcard_for_run_output_files)
else:
of = OutputFinder(config, signal_folder)
of.do_sane_output_runs()
Logger.info(message_prefix, "Analyzing exploitability (signal %s)" % signal)
egp = ExploitableGdbPlugin(config, signal_folder)
egp.divide_by_exploitability()
def main():
#Read the README before you start.
Logger.info("Setting up configuration")
gdb_script_64bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $rip, $rip+16:\n"
disassemble $rip, $rip+16
"""
gdb_script_32bit = r"""printf "[+] Disabling verbose and complaints\n"
set verbose off
set complaints 0
printf "[+] Backtrace:\n"
bt
printf "[+] info reg:\n"
info reg
printf "[+] exploitable:\n"
exploitable
printf "[+] disassemble $eip, $eip+16:\n"
disassemble $eip, $eip+16
"""
where_this_python_script_lives = os.path.dirname(os.path.realpath(__file__))
gdb_command = "gdb"
#gdb_command_osx = "/opt/local/bin/gdb-apple"
config_gm = CrashAnalysisConfig(where_this_python_script_lives,
target_binary_instrumented=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-afl/utilities/gm",
args_before="identify",
args_after="",
target_binary_plain=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-plain/utilities/gm",
target_binary_asan=where_this_python_script_lives+"/test-cases/gm/graphicsmagick-asan/utilities/gm",
env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"},
crash_dir=where_this_python_script_lives+"/test-cases/gm/crashes",
gdb_script=gdb_script_32bit,
gdb_binary=gdb_command
)
config_ffmpeg = CrashAnalysisConfig(where_this_python_script_lives,
target_binary_instrumented=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-afl/ffmpeg",
args_before="-i",
args_after="-loglevel quiet",
target_binary_plain=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-plain/ffmpeg",
# target_binary_asan=where_this_python_script_lives+"/test-cases/ffmpeg/ffmpeg-asan/ffmpeg",
env={"ASAN_SYMBOLIZER_PATH": "/usr/bin/llvm-symbolizer-3.4", "ASAN_OPTIONS": "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"},
crash_dir=where_this_python_script_lives+"/test-cases/ffmpeg/crashes",
gdb_script=gdb_script_32bit,
gdb_binary=gdb_command
)
chosen_config = config_ffmpeg
chosen_config.sanity_check()
#TODO: For some reason the ASAN environment variables are not correctly set when given above... so let's just set it in parent process already:
os.environ['ASAN_SYMBOLIZER_PATH'] = "/usr/bin/llvm-symbolizer-3.4"
os.environ['ASAN_OPTIONS'] = "symbolize=1:redzone=512:quarantine_size=512Mb:exitcode=1:abort_on_error=1"
#
Logger.info("Input crashes directory operations")
#
Logger.info("Removing README.txt files")
fdf = FileDuplicateFinder(chosen_config, chosen_config.original_crashes_directory)
fdf.remove_readmes()
Logger.info("Removing duplicates from original crashes folder (same file size + MD5)")
fdf.delete_duplicates_recursively()
Logger.info("Renaming files from original crashes folder so that the filename is a unique identifier. This allows us to copy all crash files into one directory (eg. for tmin output) if necessary, without name collisions")
fdf.rename_same_name_files()
#Logger.info("Renaming files to numeric values, as some programs prefer no special chars in filenames")
#fdf.rename_all_files()
#
Logger.info("Finding interesting signals (all crashes)")
#
sf_all_crashes = SignalFinder(chosen_config)
if os.path.exists(chosen_config.default_signal_directory):
Logger.warning("Seems like all crashes were already categorized by signal, skipping. If you want to rerun: rm -r", chosen_config.default_signal_directory)
else:
Logger.debug("Dividing files to output folder according to their signal")
sf_all_crashes.divide_by_signal()
#Interestings signals: negative on OSX, 129 and above for Linux
#Uninteresting signals: We usually don't care about signals 0, 1, 2, etc. up to 128
uninteresting_signals = range(0,129)
analyze_output_and_exploitability(chosen_config, sf_all_crashes, uninteresting_signals, message_prefix="Interesting signals /")
Logger.info("Interesting signals / Minimizing input (afl-tmin)")
if os.path.exists(chosen_config.default_minimized_crashes_directory):
Logger.warning("Seems like crashes were already minimized, skipping. If you want to rerun: rm -r", chosen_config.default_minimized_crashes_directory)
else:
for signal, signal_folder in sf_all_crashes.get_folder_paths_for_signals_if_exist(uninteresting_signals):
Logger.debug("Minimizing inputs resulting in signal %i" % signal)
im = InputMinimizer(chosen_config, signal_folder)
im.minimize_testcases()
Logger.info("Interesting signals / Minimized inputs / Deduplication")
fdf_minimized = FileDuplicateFinder(chosen_config, chosen_config.default_minimized_crashes_directory)
fdf_minimized.delete_duplicates_recursively()
#
Logger.info("Interesting signals / Minimized inputs / Finding interesting signals")
#
sf_minimized_crashes = SignalFinder(chosen_config, chosen_config.default_minimized_crashes_directory, os.path.join(chosen_config.output_dir, "minimized-per-signal"))
if os.path.exists(sf_minimized_crashes.output_dir):
Logger.warning("Seems like minimized crashes were already categorized by signal, skipping. If you want to rerun: rm -r", sf_minimized_crashes.output_dir)
else:
os.mkdir(sf_minimized_crashes.output_dir)
Logger.info("Dividing files to output folder according to their signal")
sf_minimized_crashes.divide_by_signal(0)
analyze_output_and_exploitability(chosen_config, sf_minimized_crashes, uninteresting_signals, message_prefix="Interesting signals / Minimized inputs /")
# # If you are in the mood to waste a little CPU time, run this
# Logger.info("Found interesting_signals (interesting interesting_signals) / Minimized inputs (interested interesting_signals) / Feeling lucky auto exploitation")
# #
# fle = FeelingLuckyExploiter(chosen_config, sf_minimized_crashes.output_dir)
# #os.mkdir(fle.output_dir)
# fle.run_forest_run()
#TODO: develop
#- exploitable script, something along: less `grep -l 'Exploitability Classification: EXPLOITABLE' output/per-signal/*/*gdb*`
cleanup(chosen_config)
def cleanup(config):
for path, _, files in os.walk(config.tmp_dir):
for filename in files:
os.remove(os.path.join(path, filename))
if __name__ == "__main__":
main()
|
chubbymaggie/afl-crash-analyzer
|
AflCrashAnalyzer.py
|
Python
|
gpl-3.0
| 9,878
|
# -*- coding: utf-8 -*-
import re
from random import randrange
from urllib import quote, unquote
from time import time
from module.common.json_layer import json_loads
from module.plugins.Hoster import Hoster
from module.utils import parseFileSize
class RealdebridCom(Hoster):
__name__ = "RealdebridCom"
__type__ = "hoster"
__version__ = "0.53"
__pattern__ = r'https?://(?:[^/]*\.)?real-debrid\..*'
__description__ = """Real-Debrid.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Devirex Hazzard", "naibaf_11@yahoo.de")]
def getFilename(self, url):
try:
name = unquote(url.rsplit("/", 1)[1])
except IndexError:
name = "Unknown_Filename..."
if not name or name.endswith(".."): #: incomplete filename, append random stuff
name += "%s.tmp" % randrange(100, 999)
return name
def setup(self):
self.chunkLimit = 3
self.resumeDownload = True
def process(self, pyfile):
if re.match(self.__pattern__, pyfile.url):
new_url = pyfile.url
elif not self.account:
self.logError(_("Please enter your %s account or deactivate this plugin") % "Real-debrid")
self.fail(_("No Real-debrid account provided"))
else:
self.logDebug("Old URL: %s" % pyfile.url)
password = self.getPassword().splitlines()
if not password:
password = ""
else:
password = password[0]
url = "https://real-debrid.com/ajax/unrestrict.php?lang=en&link=%s&password=%s&time=%s" % (
quote(pyfile.url, ""), password, int(time() * 1000))
page = self.load(url)
data = json_loads(page)
self.logDebug("Returned Data: %s" % data)
if data['error'] != 0:
if data['message'] == "Your file is unavailable on the hoster.":
self.offline()
else:
self.logWarning(data['message'])
self.tempOffline()
else:
if pyfile.name is not None and pyfile.name.endswith('.tmp') and data['file_name']:
pyfile.name = data['file_name']
pyfile.size = parseFileSize(data['file_size'])
new_url = data['generated_links'][0][-1]
if self.getConfig("https"):
new_url = new_url.replace("http://", "https://")
else:
new_url = new_url.replace("https://", "http://")
if new_url != pyfile.url:
self.logDebug("New URL: %s" % new_url)
if pyfile.name.startswith("http") or pyfile.name.startswith("Unknown") or pyfile.name.endswith('..'):
#only use when name wasnt already set
pyfile.name = self.getFilename(new_url)
self.download(new_url, disposition=True)
check = self.checkDownload(
{"error": "<title>An error occured while processing your request</title>"})
if check == "error":
#usual this download can safely be retried
self.retry(wait_time=60, reason=_("An error occured while generating link"))
|
sebdelsol/pyload
|
module/plugins/hoster/RealdebridCom.py
|
Python
|
gpl-3.0
| 3,217
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-22 12:05
from __future__ import unicode_literals
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('churchmanager', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='church',
name='wide_crop',
field=image_cropping.fields.ImageRatioField('photo', '768x240', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='wide crop'),
),
]
|
bm424/churchmanager
|
migrations/0002_church_wide_crop.py
|
Python
|
mit
| 641
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019-2020 Philipp Wolfer
# Copyright (C) 2020 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import namedtuple
import re
class VersionError(Exception):
pass
class Version(namedtuple('VersionBase', 'major minor patch identifier revision')):
_version_re = re.compile(r"(\d+)(?:[._](\d+)(?:[._](\d+)[._]?(?:(dev|a|alpha|b|beta|rc|final)[._]?(\d+))?)?)?$")
_identifiers = {
'dev': 0,
'alpha': 1,
'a': 1,
'beta': 2,
'b': 2,
'rc': 3,
'final': 4
}
def __new__(cls, major, minor=0, patch=0, identifier='final', revision=0):
if identifier not in cls.valid_identifiers():
raise VersionError("Should be either 'final', 'dev', 'alpha', 'beta' or 'rc'")
identifier = {'a': 'alpha', 'b': 'beta'}.get(identifier, identifier)
try:
major = int(major)
minor = int(minor)
patch = int(patch)
revision = int(revision)
except (TypeError, ValueError):
raise VersionError("major, minor, patch and revision must be integer values")
return super(Version, cls).__new__(cls, major, minor, patch, identifier, revision)
@classmethod
def from_string(cls, version_str):
match = cls._version_re.search(version_str)
if match:
(major, minor, patch, identifier, revision) = match.groups()
major = int(major)
if minor is None:
return Version(major)
minor = int(minor)
if patch is None:
return Version(major, minor)
patch = int(patch)
if identifier is None:
return Version(major, minor, patch)
revision = int(revision)
return Version(major, minor, patch, identifier, revision)
raise VersionError("String '%s' does not match regex '%s'" % (version_str,
cls._version_re.pattern))
@classmethod
def valid_identifiers(cls):
return set(cls._identifiers.keys())
def to_string(self, short=False):
if short and self.identifier in ('alpha', 'beta'):
version = self._replace(identifier=self.identifier[0])
else:
version = self
if short and version.identifier == 'final':
if version.patch == 0:
version_str = '%d.%d' % version[:2]
else:
version_str = '%d.%d.%d' % version[:3]
elif short and version.identifier in ('a', 'b', 'rc'):
version_str = '%d.%d.%d%s%d' % version
else:
version_str = '%d.%d.%d.%s%d' % version
return version_str
@property
def sortkey(self):
return self[:3] + (self._identifiers.get(self.identifier, 0), self.revision)
def __str__(self):
return self.to_string()
def __lt__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey < other.sortkey
def __le__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey <= other.sortkey
def __gt__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey > other.sortkey
def __ge__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey >= other.sortkey
def __eq__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey == other.sortkey
def __ne__(self, other):
if not isinstance(other, Version):
other = Version(*other)
return self.sortkey != other.sortkey
def __hash__(self):
return super().__hash__()
|
musicbrainz/picard
|
picard/version.py
|
Python
|
gpl-2.0
| 4,658
|
#!/usr/bin/env python
# coding=utf-8
"""
Diophantine equation
Problem 66
Consider quadratic Diophantine equations of the form:
x^2 – Dy^2 = 1
For example, when D=13, the minimal solution in x is 649^2 – 13×180^2 = 1.
It can be assumed that there are no solutions in positive integers when D is
square.
By finding minimal solutions in x for D = {2, 3, 5, 6, 7}, we obtain the
following:
3^2 – 2×2^2 = 1
2^2 – 3×1^2 = 1
9^2 – 5×4^2 = 1
5^2 – 6×2^2 = 1
8^2 – 7×3^2 = 1
Hence, by considering minimal solutions in x for D ≤ 7, the largest x is
obtained when D=5.
Find the value of D ≤ 1000 in minimal solutions of x for which the largest
value of x is obtained.
"""
from __future__ import print_function
from pe046_goldbachs_other_conjecture import is_square_number
from pe064_odd_period_square_roots import sqrt_cf
from pe065_convergents_of_e import converget_cf
def continued_fraction(p, q):
"""to continued fraction expression
:param p:
:param q:
:return: fraction expression
"""
vals = []
while q > 0:
r, p = divmod(p, q)
vals.append(r)
p, q = q, p
return vals
def pell(D):
for x, y in converget_cf(sqrt_cf(D)):
if x * x - D * y * y == 1:
return x, D
if __name__ == '__main__':
# 661
print(max(pell(D) for D in range(2, 1001) if not is_square_number(D)))
|
openqt/algorithms
|
projecteuler/ac/old/pe066_diophantine_equation.py
|
Python
|
gpl-3.0
| 1,405
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unified data model in workflow, based on Deposition model."""
import os
from invenio.base.globals import cfg
from invenio_deposit.models import (
Deposition,
Agent,
DepositionDraft,
SubmissionInformationPackage,
DepositionStorage,
DepositionFile,
FilenameAlreadyExists,
)
from invenio_deposit.storage import Storage
def create_payload(obj, eng):
"""Create a proper data model inside obj.data."""
p = Payload.create(workflow_object=obj, type=eng.name)
p.save()
class PayloadStorage(Storage):
"""Payload storage backend.
Saves files to a folder (<WORKFLOWS_STORAGEDIR>/<payload_id>/).
"""
def __init__(self, payload_id):
"""Initialize storage."""
self.fs_path = os.path.join(
cfg['WORKFLOWS_STORAGEDIR'],
str(payload_id)
)
class Payload(Deposition):
"""Wrap a BibWorkflowObject."""
def __init__(self, workflow_object, type=None, user_id=None):
self.files = []
self.drafts = {}
self.type = self.get_type(type)
self.title = ''
self.sips = []
super(Payload, self).__init__(workflow_object, type, user_id)
@classmethod
def get_type(self, type_or_id):
"""Get type."""
from invenio_workflows.registry import workflows
return workflows.get(type_or_id)
@classmethod
def create(cls, user=None, type=None, workflow_object=None):
"""
Create a new deposition object.
To persist the deposition, you must call save() on the created object.
If no type is defined, the default deposition type will be assigned.
@param user: The owner of the deposition
@param type: Deposition type identifier.
"""
if user is not None:
user = user.get_id()
if workflow_object:
sip = SubmissionInformationPackage(metadata=workflow_object.data)
workflow_object.data = {
"sips": [sip.__getstate__()],
"files": [],
"title": "",
"drafts": {},
"type": type,
}
workflow_object.set_data(workflow_object.data)
workflow_object.save()
# Note: it is correct to pass 'type' and not 't' below to constructor.
obj = cls(workflow_object=workflow_object, type=type, user_id=user)
return obj
def __setstate__(self, state):
"""Deserialize deposition from state stored in BibWorkflowObject."""
self.type = self.get_type(state['type']) # FIXME only difference
self.title = state['title']
self.files = [
DepositionFile.factory(
f_state,
uuid=f_state['id'],
backend=DepositionStorage(self.id),
)
for f_state in state['files']
]
self.drafts = dict(
[(d_id, DepositionDraft.factory(d_state, d_id,
deposition_ref=self))
for d_id, d_state in state['drafts'].items()]
)
self.sips = [
SubmissionInformationPackage.factory(s_state, uuid=s_state['id'])
for s_state in state.get('sips', [])
]
def prepare_sip(self, from_request_context=False):
sip = self.get_latest_sip()
if sip is None:
sip = self.create_sip()
if 'files' in sip.metadata:
sip.metadata['fft'] = sip.metadata['files']
del sip.metadata['files']
sip.agents = [Agent(role='creator',
from_request_context=from_request_context)]
self.update()
|
Dziolas/inspire-next
|
inspire/modules/workflows/models.py
|
Python
|
gpl-2.0
| 4,446
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.filter.series import SeriesTask, Series, get_latest_release, get_latest_season_pack_release
from flexget.plugins.filter.series import get_latest_episode_release
plugin_name = 'next_series_seasons'
log = logging.getLogger(plugin_name)
MAX_SEASON_DIFF_WITHOUT_BEGIN = 15
MAX_SEASON_DIFF_WITH_BEGIN = 30
class NextSeriesSeasons(object):
"""
Emit next season number from all series configured in this task.
Supports only 'ep' mode series.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'from_start': {'type': 'boolean', 'default': False},
'backfill': {'type': 'boolean', 'default': False},
'threshold': {'type': 'integer', 'minimum': 0}
},
'additionalProperties': False
}
]
}
def __init__(self):
self.rerun_entries = []
def season_identifiers(self, season):
return ['S%02d' % season]
def search_entry(self, series, season, task, rerun=True):
# Extract the alternate names for the series
alts = [alt.alt_name for alt in series.alternate_names]
# Also consider series name without parenthetical (year, country) an alternate name
paren_match = re.match(r'(.+?)( \(.+\))?$', series.name)
if paren_match.group(2):
alts.append(paren_match.group(1))
search_strings = ['%s %s' % (series.name, id) for id in self.season_identifiers(season)]
series_id = 'S%02d' % season
for alt in alts:
search_strings.extend(['%s %s' % (alt, id) for id in self.season_identifiers(season)])
entry = Entry(title=search_strings[0], url='',
search_strings=search_strings,
series_name=series.name,
series_alternate_names=alts, # Not sure if this field is useful down the road.
series_season=season,
season_pack_lookup=True,
series_id=series_id,
series_id_type=series.identified_by)
if rerun:
entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by)
return entry
def on_task_input(self, task, config):
if not config:
return
if isinstance(config, bool):
config = {}
if task.is_rerun:
# Just return calculated next eps on reruns
entries = self.rerun_entries
self.rerun_entries = []
return entries
else:
self.rerun_entries = []
threshold = config.get('threshold')
entries = []
impossible = {}
with Session() as session:
for seriestask in session.query(SeriesTask).filter(SeriesTask.name == task.name).all():
series = seriestask.series
log.trace('evaluating %s', series.name)
if not series:
# TODO: How can this happen?
log.debug('Found SeriesTask item without series specified. Cleaning up.')
session.delete(seriestask)
continue
if series.identified_by not in ['ep']:
log.trace('unsupported identified_by scheme')
reason = series.identified_by or 'auto'
impossible.setdefault(reason, []).append(series.name)
continue
low_season = 0
# Don't look for seasons older than begin ep
if series.begin and series.begin.season and series.begin.season > 1:
low_season = max(series.begin.season - 1, 0)
new_season = None
check_downloaded = not config.get('backfill')
latest_season = get_latest_release(series, downloaded=check_downloaded)
if latest_season:
if latest_season.season <= low_season:
latest_season = new_season = low_season + 1
elif latest_season.season in series.completed_seasons:
latest_season = new_season = latest_season.season + 1
else:
latest_season = latest_season.season
else:
latest_season = low_season + 1
if (latest_season - low_season > MAX_SEASON_DIFF_WITHOUT_BEGIN and not series.begin) or (
series.begin and latest_season - series.begin.season > MAX_SEASON_DIFF_WITH_BEGIN):
if series.begin:
log.error('Series `%s` has a begin episode set (`%s`), but the season currently being processed'
' (%s) is %s seasons later than it. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is adjusted to a season that is less '
'than %s seasons from season %s.', series.name, series.begin.identifier,
latest_season, (latest_season - series.begin.season), MAX_SEASON_DIFF_WITH_BEGIN,
latest_season)
else:
log.error('Series `%s` does not have a begin episode set and continuing this task would result '
'in more than %s seasons being emitted. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is set in your series config or by '
'using the CLI subcommand `series begin "%s" <SxxExx>`.', series.name,
MAX_SEASON_DIFF_WITHOUT_BEGIN, series.name)
continue
for season in range(latest_season, low_season, -1):
if season in series.completed_seasons:
log.debug('season %s is marked as completed, skipping', season)
continue
if threshold is not None and series.episodes_for_season(season) > threshold:
log.debug('season %s has met threshold of threshold of %s, skipping', season, threshold)
continue
log.trace('Evaluating season %s for series `%s`', season, series.name)
latest = get_latest_release(series, season=season, downloaded=check_downloaded)
if series.begin and season == series.begin.season and (not latest or latest < series.begin):
# In case series.begin season is already completed, look in next available season
lookup_season = series.begin.season
while lookup_season in series.completed_seasons:
lookup_season += 1
entries.append(self.search_entry(series, lookup_season, task))
elif latest:
entries.append(self.search_entry(series, latest.season, task))
# First iteration of a new season with no show begin and show has downloads
elif new_season and season == new_season:
entries.append(self.search_entry(series, season, task))
else:
if config.get('from_start') or config.get('backfill'):
entries.append(self.search_entry(series, season, task))
else:
log.verbose('Series `%s` has no history. Set the begin option in your config, '
'or use the CLI subcommand `series begin "%s" <SxxExx>` '
'to set the first episode to emit', series.name, series.name)
break
# Skip older seasons if we are not in backfill mode
if not config.get('backfill'):
log.debug('backfill is not enabled; skipping older seasons')
break
for reason, series in impossible.items():
log.verbose('Series `%s` with identified_by value `%s` are not supported. ',
', '.join(sorted(series)), reason)
return entries
def on_search_complete(self, entry, task=None, identified_by=None, **kwargs):
"""Decides whether we should look for next season based on whether we found/accepted any seasons."""
with Session() as session:
series = session.query(Series).filter(Series.name == entry['series_name']).first()
latest = get_latest_season_pack_release(series)
latest_ep = get_latest_episode_release(series, season=entry['series_season'])
if entry.accepted:
if not latest and latest_ep:
log.debug('season lookup produced an episode result; assuming no season match, no need to rerun')
return
else:
log.debug('%s %s was accepted, rerunning to look for next season.', entry['series_name'],
entry['series_id'])
if not any(e.get('series_season') == latest.season + 1 for e in self.rerun_entries):
self.rerun_entries.append(self.search_entry(series, latest.season + 1, task))
# Increase rerun limit by one if we have matches, this way
# we keep searching as long as matches are found!
# TODO: this should ideally be in discover so it would be more generic
task.max_reruns += 1
task.rerun(plugin=plugin_name, reason='Look for next season')
elif latest and not latest.completed:
# There are known releases of this season, but none were accepted
return
@event('plugin.register')
def register_plugin():
plugin.register(NextSeriesSeasons, plugin_name, api_ver=2)
|
LynxyssCZ/Flexget
|
flexget/plugins/input/next_series_seasons.py
|
Python
|
mit
| 10,522
|
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
def _contains_nan(x):
"""Returns whether the input array has NaN values.
Args:
x (numpy.ndarray or cupy.ndarray): Array to be checked.
Returns:
bool: True if the input has NaN values.
"""
if x.dtype.kind in ('f', 'c'):
with cuda.get_device_from_array(x):
return cuda.get_array_module(x).isnan(x).any()
else:
return False
def copyto(dst, src):
"""Copies the elements of an ndarray to those of another one.
This function can copy the CPU/GPU arrays to the destination arrays on
another device.
Args:
dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
Destination array.
src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
Source array.
"""
if isinstance(dst, numpy.ndarray):
numpy.copyto(dst, numpy.asarray(cuda.to_cpu(src)))
elif isinstance(dst, intel64.mdarray):
intel64.ideep.basic_copyto(dst, cuda.to_cpu(src))
elif isinstance(dst, cuda.ndarray):
if isinstance(src, chainer.get_cpu_array_types()):
src = numpy.asarray(src)
if dst.flags.c_contiguous or dst.flags.f_contiguous:
dst.set(src)
else:
cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
elif isinstance(src, cuda.ndarray):
cuda.cupy.copyto(dst, src)
else:
raise TypeError('cannot copy from non-array object of type {}'
.format(type(src)))
else:
raise TypeError('cannot copy to non-array object of type {}'.format(
type(dst)))
|
rezoo/chainer
|
chainer/backend.py
|
Python
|
mit
| 1,748
|
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
from . import controller_account
from . import controller_catalog
from . import controller_sale_order
from . import controller_technical
|
grap/odoo-eshop
|
odoo_eshop/eshop_app/controllers/__init__.py
|
Python
|
agpl-3.0
| 187
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsInterpolatedLineSymbolLayer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Vincent Cloares'
__date__ = '2021-04'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.testing import unittest
from qgis.PyQt.QtCore import (QDir,
QPointF)
from qgis.PyQt.QtGui import (QImage,
QPainter,
QColor,
QPolygonF)
from qgis.core import (QgsRenderChecker,
QgsInterpolatedLineSymbolLayer,
QgsInterpolatedLineWidth,
QgsInterpolatedLineColor,
QgsColorRampShader,
QgsStyle,
QgsMapSettings,
QgsLineSymbol,
QgsGeometry,
QgsFeature,
QgsRenderContext,
QgsSymbolLayer,
QgsProperty)
class TestQgsLineSymbolLayers(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsInterpolatedLineSymbolLayer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'symbollayer_' + name + ".png"
image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("symbol_interpolatedline")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 0)
self.report += checker.report()
print((self.report))
return result
def renderImage(self, interpolated_width, interpolated_color, image_name):
layer = QgsInterpolatedLineSymbolLayer()
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineStartWidthValue, QgsProperty.fromExpression('5'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineEndWidthValue, QgsProperty.fromExpression('1'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineStartColorValue, QgsProperty.fromExpression('2'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineEndColorValue, QgsProperty.fromExpression('6'))
layer.setInterpolatedWidth(interpolated_width)
layer.setInterpolatedColor(interpolated_color)
symbol = QgsLineSymbol()
symbol.changeSymbolLayer(0, layer)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
geom = QgsGeometry.fromWkt('LineString (0 0, 10 0, 10 10, 0 10, 0 5)')
f = QgsFeature()
f.setGeometry(geom)
extent = geom.constGet().boundingBox()
# buffer extent by 10%
extent = extent.buffered((extent.height() + extent.width()) / 20.0)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
painter.begin(image)
image.fill(QColor(255, 255, 255))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
painter.end()
self.assertTrue(self.imageCheck(image_name, image_name, image))
def testFixedColorFixedWidth(self):
""" test that rendering a interpolated line with fixed width and fixed color"""
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(False)
interpolated_width.setFixedStrokeWidth(5)
interpolated_color.setColor(QColor(255, 0, 0))
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.SingleColor)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_1')
def testRenderNoFeature(self):
""" test that rendering a interpolated line outside of a map render works"""
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(False)
interpolated_width.setFixedStrokeWidth(5)
interpolated_color.setColor(QColor(255, 0, 0))
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.SingleColor)
layer = QgsInterpolatedLineSymbolLayer()
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineStartWidthValue, QgsProperty.fromExpression('5'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineEndWidthValue, QgsProperty.fromExpression('1'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineStartColorValue, QgsProperty.fromExpression('2'))
layer.setDataDefinedProperty(QgsSymbolLayer.PropertyLineEndColorValue, QgsProperty.fromExpression('6'))
layer.setInterpolatedWidth(interpolated_width)
layer.setInterpolatedColor(interpolated_color)
symbol = QgsLineSymbol()
symbol.changeSymbolLayer(0, layer)
image = QImage(200, 200, QImage.Format_RGB32)
image.fill(QColor(255, 255, 255))
painter = QPainter(image)
context = QgsRenderContext.fromQPainter(painter)
symbol.startRender(context)
symbol.renderPolyline(QPolygonF([QPointF(30, 50), QPointF(100, 70), QPointF(150, 30)]), None, context)
symbol.stopRender(context)
painter.end()
self.assertTrue(self.imageCheck('interpolatedlinesymbollayer_no_feature', 'interpolatedlinesymbollayer_no_feature', image))
def testVaryingColorFixedWidth(self):
""" test that rendering a interpolated line with fixed width and varying color"""
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(False)
interpolated_width.setFixedStrokeWidth(5)
color_ramp = QgsColorRampShader(0, 7, QgsStyle.defaultStyle().colorRamp('Viridis'),
QgsColorRampShader.Interpolated)
color_ramp.classifyColorRamp(10)
interpolated_color.setColor(color_ramp)
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.ColorRamp)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_2')
def testFixedColorVaryingWidth(self):
""" test that rendering a interpolated line with varying width and fixed color"""
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(True)
interpolated_width.setMinimumValue(1)
interpolated_width.setMaximumValue(8)
interpolated_width.setMinimumWidth(1)
interpolated_width.setMaximumWidth(10)
interpolated_color.setColor(QColor(0, 255, 0))
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.SingleColor)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_3')
def testVaryingColorVaryingWidth(self):
""" test that rendering a interpolated line with varying width and varying color"""
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(True)
interpolated_width.setMinimumValue(1)
interpolated_width.setMaximumValue(8)
interpolated_width.setMinimumWidth(1)
interpolated_width.setMaximumWidth(10)
color_ramp = QgsColorRampShader(0, 7, QgsStyle.defaultStyle().colorRamp('Viridis'),
QgsColorRampShader.Interpolated)
color_ramp.classifyColorRamp(10)
interpolated_color.setColor(color_ramp)
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.ColorRamp)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_4')
def testVaryingColorVaryingWidthDiscrete(self):
""" test that rendering a interpolated line with varying width and varying color with discrete color ramp """
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(True)
interpolated_width.setMinimumValue(1)
interpolated_width.setMaximumValue(8)
interpolated_width.setMinimumWidth(1)
interpolated_width.setMaximumWidth(10)
color_ramp = QgsColorRampShader(2, 7, QgsStyle.defaultStyle().colorRamp('RdGy'),
QgsColorRampShader.Discrete)
color_ramp.classifyColorRamp(5)
interpolated_color.setColor(color_ramp)
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.ColorRamp)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_5')
def testVaryingColorVaryingWidthExact(self):
""" test that rendering a interpolated line with varying width and varying color with exact color ramp """
interpolated_width = QgsInterpolatedLineWidth()
interpolated_color = QgsInterpolatedLineColor()
interpolated_width.setIsVariableWidth(True)
interpolated_width.setMinimumValue(1)
interpolated_width.setMaximumValue(8)
interpolated_width.setMinimumWidth(1)
interpolated_width.setMaximumWidth(10)
color_ramp = QgsColorRampShader(0, 10, QgsStyle.defaultStyle().colorRamp('Viridis'),
QgsColorRampShader.Exact)
color_ramp.classifyColorRamp(10)
interpolated_color.setColor(color_ramp)
interpolated_color.setColoringMethod(QgsInterpolatedLineColor.ColorRamp)
self.renderImage(interpolated_width, interpolated_color, 'interpolatedlinesymbollayer_6')
if __name__ == '__main__':
unittest.main()
|
tomtor/QGIS
|
tests/src/python/test_qgsinterpolatedlinesymbollayers.py
|
Python
|
gpl-2.0
| 10,544
|
class ProviderMachine(object):
"""
Representative of a machine resource created by a
:class:`MachineProvider`. The object is typically annotated by the
machine provider, such that the provider can perform subsequent
actions upon it, using the additional metadata for identification,
without leaking these details to consumers of the
:class:`MachineProvider` api.
"""
def __init__(self, instance_id, dns_name=None, private_dns_name=None,
state="unknown"):
self.instance_id = instance_id
# ideally this would be ip_address, but txaws doesn't expose it.
self.dns_name = dns_name
self.private_dns_name = private_dns_name
self.state = state
|
mcclurmc/juju
|
juju/machine/__init__.py
|
Python
|
agpl-3.0
| 732
|
from django.test import TestCase
from store.forms import ReviewForm
from store.models import Review
from .factories import *
class ReviewFormTest(TestCase):
def test_form_validation_for_blank_items(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': '', 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'],["Please fill in the review"])
self.assertEqual(form.errors['rating'],["Please leave a rating"])
def test_form_validation_for_invalid_review(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': '', 'rating': 0, 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'],["Please fill in the review"])
self.assertEqual(form.errors['rating'],["Please leave a valid rating"])
def test_form_validation_for_required_name_field(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': 'Hello', 'rating': 2, 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],['Please fill in your name'])
def test_form_save_handles_saving_product_reviews(self):
prod = ProductFactory.create()
form = ReviewForm(
data={'name':'Kevin', 'text': 'Review', 'rating': 3, 'product':prod.id})
new_review = form.save()
self.assertEqual(new_review, Review.objects.first())
self.assertEqual(new_review.name, 'Kevin')
self.assertEqual(new_review.product, prod)
|
kevgathuku/compshop
|
store/tests/test_forms.py
|
Python
|
bsd-3-clause
| 1,654
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import re
import shlex
import logging as log
from targetd.utils import invoke
class Export(object):
SECURE = 0x00000001
RW = 0x00000002
RO = 0x00000004
SYNC = 0x00000008
ASYNC = 0x00000010
NO_WDELAY = 0x00000020
NOHIDE = 0x00000040
CROSSMNT = 0x00000080
NO_SUBTREE_CHECK = 0x00000100
INSECURE_LOCKS = 0x00000200
ROOT_SQUASH = 0x00000400
NO_ROOT_SQUASH = 0x00000800
ALL_SQUASH = 0x00001000
WDELAY = 0x00002000
HIDE = 0x00004000
INSECURE = 0x00008000
NO_ALL_SQUASH = 0x00010000
_conflicting = (((RW | RO), "Both RO & RW set"),
((INSECURE | SECURE), "Both INSECURE & SECURE set"),
((SYNC | ASYNC), "Both SYNC & ASYNC set"),
((HIDE | NOHIDE), "Both HIDE & NOHIDE set"),
((WDELAY | NO_WDELAY), "Both WDELAY & NO_WDELAY set"),
((ROOT_SQUASH | NO_ROOT_SQUASH),
"Only one option of ROOT_SQUASH, NO_ROOT_SQUASH, "
"can be specified")
)
bool_option = {
"secure": SECURE,
"rw": RW,
"ro": RO,
"sync": SYNC,
"async": ASYNC,
"no_wdelay": NO_WDELAY,
"nohide": NOHIDE,
"crossmnt": CROSSMNT,
"no_subtree_check": NO_SUBTREE_CHECK,
"insecure_locks": INSECURE_LOCKS,
"root_squash": ROOT_SQUASH,
"all_squash": ALL_SQUASH,
"wdelay": WDELAY,
"hide": HIDE,
"insecure": INSECURE,
"no_root_squash": NO_ROOT_SQUASH,
"no_all_squash": NO_ALL_SQUASH
}
key_pair = dict(
mountpoint=str,
mp=str,
fsid=None,
refer=str,
replicas=str,
anonuid=int,
anongid=int,
sec=str)
export_regex = r"([\/a-zA-Z0-9\.\-_]+)[\s]+(.+)\((.+)\)"
octal_nums_regex = r"""\\([0-7][0-7][0-7])"""
@staticmethod
def _validate_options(options):
for e in Export._conflicting:
if (options & (e[0])) == e[0]:
raise ValueError(e[1])
return options
@staticmethod
def _validate_key_pairs(kp):
if kp:
if isinstance(kp, dict):
for k, v in kp.items():
if k not in Export.key_pair:
raise ValueError('option %s not valid' % k)
return kp
else:
raise ValueError('key_value_options domain is None or dict')
else:
return {}
def __init__(self, host, path, bit_wise_options=0, key_value_options=None):
if host == '<world>':
self.host = '*'
else:
self.host = host
self.path = path
self.options = Export._validate_options(bit_wise_options)
self.key_value_options = Export._validate_key_pairs(key_value_options)
@staticmethod
def _parse_opt(options_string):
bits = 0
pairs = {}
if len(options_string):
options = options_string.split(',')
for o in options:
if '=' in o:
# We have a key=value
key, value = o.split('=')
pairs[key] = value
else:
bits |= Export.bool_option[o]
return bits, pairs
@staticmethod
def _override(combined, test, opt_a, opt_b):
if test & opt_a:
combined &= ~opt_b
if test & opt_b:
combined &= ~opt_a
return combined
@staticmethod
def parse_opt(global_options, specific_options=None):
gbit, gpairs = Export._parse_opt(global_options)
if specific_options is None:
return gbit, gpairs
sbit, spairs = Export._parse_opt(specific_options)
Export._validate_options(gbit)
Export._validate_options(sbit)
# Remove global options which are overridden by specific
culled = gbit | sbit
culled = Export._override(culled, sbit, Export.RO, Export.RW)
culled = Export._override(culled, sbit, Export.INSECURE, Export.SECURE)
culled = Export._override(culled, sbit, Export.SYNC, Export.ASYNC)
culled = Export._override(culled, sbit, Export.HIDE, Export.NOHIDE)
culled = Export._override(culled, sbit, Export.WDELAY, Export.NO_WDELAY)
culled = Export._override(culled, sbit,
Export.ROOT_SQUASH,
Export.NO_ROOT_SQUASH)
gpairs.update(spairs)
return culled, gpairs
@staticmethod
def parse_export(tokens):
rc = []
try:
global_options = ''
options = ''
if len(tokens) >= 1:
path = tokens[0]
if len(tokens) > 1:
for t in tokens[1:]:
# Handle global options
if t[0] == '-' and not global_options:
global_options = t[1:]
continue
# Check for a host or a host with an options group
if '(' and ')' in t:
if t[0] != '(':
host, options = t[:-1].split('(')
else:
host = '*'
options = t[1:-1]
else:
host = t
rc.append(
Export(host, path,
*Export.parse_opt(global_options, options)))
else:
rc.append(Export('*', path))
except Exception as e:
log.error("parse_export: %s" % str(e))
return None
return rc
@staticmethod
def parse_exports_file(f):
rc = []
with open(f, "r") as e_f:
for line in e_f:
exp = Export.parse_export(
shlex.split(Export._chr_encode(line), '#'))
if exp:
rc.extend(exp)
return rc
@staticmethod
def parse_exportfs_output(export_text):
rc = []
pattern = re.compile(Export.export_regex)
for m in re.finditer(pattern, export_text):
rc.append(
Export(m.group(2), m.group(1), *Export.parse_opt(m.group(3))))
return rc
def options_list(self):
rc = []
for k, v in self.bool_option.items():
if self.options & v:
rc.append(k)
for k, v in self.key_value_options.items():
rc.append('%s=%s' % (k, v))
return rc
def options_string(self):
return ','.join(self.options_list())
@staticmethod
def _double_quote_space(s):
if ' ' in s:
return '"%s"' % s
return s
def __repr__(self):
return "%s %s(%s)" % (Export._double_quote_space(self.path).ljust(50),
self.host, self.options_string())
def export_file_format(self):
return "%s %s(%s)\n" % (Export._double_quote_space(self.path),
self.host, self.options_string())
@staticmethod
def _chr_encode(s):
# Replace octal values, the export path can contain \nnn in the
# export name.
p = re.compile(Export.octal_nums_regex)
for m in re.finditer(p, s):
s = s.replace('\\' + m.group(1), chr(int(m.group(1), 8)))
return s
def __eq__(self, other):
return self.path == other.path and self.host == other.host
class Nfs(object):
"""
Python module for configuring NFS exports
"""
CMD = 'exportfs'
EXPORT_FILE = 'targetd.exports'
EXPORT_FS_CONFIG_DIR = os.getenv("TARGETD_NFS_EXPORT_DIR", '/etc/exports.d')
MAIN_EXPORT_FILE = os.getenv("TARGETD_NFS_EXPORT", '/etc/exports')
@staticmethod
def security_options():
return "sys", "krb5", "krb5i", "krb5p"
@staticmethod
def _save_exports():
# Remove existing export
config_file = os.path.join(Nfs.EXPORT_FS_CONFIG_DIR, Nfs.EXPORT_FILE)
try:
os.remove(config_file)
except OSError:
pass
# Get exports in /etc/exports
user_exports = Export.parse_exports_file(Nfs.MAIN_EXPORT_FILE)
# Recreate all existing exports
with open(config_file, 'w') as ef:
for e in Nfs.exports():
if e not in user_exports:
ef.write(e.export_file_format())
@staticmethod
def exports():
"""
Return list of exports
"""
ec, out, error = invoke([Nfs.CMD, '-v'])
rc = Export.parse_exportfs_output(out)
return rc
@staticmethod
def export_add(host, path, bit_wise_options, key_value_options):
"""
Adds a path as an NFS export
"""
export = Export(host, path, bit_wise_options, key_value_options)
options = export.options_string()
cmd = [Nfs.CMD]
if len(options):
cmd.extend(['-o', options])
cmd.extend(['%s:%s' % (host, path)])
ec, out, err = invoke(cmd, False)
if ec == 0:
Nfs._save_exports()
return None
elif ec == 22:
raise ValueError("Invalid option: %s" % err)
else:
raise RuntimeError('Unexpected exit code "%s" %s, out= %s' %
(str(cmd), str(ec), str(out + ":" + err)))
@staticmethod
def export_remove(export):
ec, out, err = invoke(
[Nfs.CMD, '-u',
'%s:%s' % (export.host, export.path)])
if ec == 0:
Nfs._save_exports()
|
tasleson/targetd
|
targetd/nfs.py
|
Python
|
gpl-3.0
| 10,459
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Syed Faisal Akber
#
|
VA3SFA/rpi_hw_demo
|
hc-sr04/distance.py
|
Python
|
gpl-2.0
| 82
|
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'champion_relationships'
SPIDER_MODULES = ['champion_relationships.spiders']
NEWSPIDER_MODULE = 'champion_relationships.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tutorial.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
sserrot/champion_relationships
|
champion_relationships/settings.py
|
Python
|
mit
| 3,187
|
from __future__ import print_function
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import Newsfeed, plaintext, URL
from pattern.db import date
# This example reads a given RSS or Atom newsfeed channel.
# Some example feeds to try out:
NATURE = "http://feeds.nature.com/nature/rss/current"
SCIENCE = "http://www.sciencemag.org/rss/podcast.xml"
NYT = "http://rss.nytimes.com/services/xml/rss/nyt/GlobalHome.xml"
TIME = "http://feeds.feedburner.com/time/topstories"
CNN = "http://rss.cnn.com/rss/edition.rss"
engine = Newsfeed()
for result in engine.search(CNN, cached=True):
print(result.title.upper())
print(plaintext(result.text)) # Remove HTML formatting.
print(result.url)
print(result.date)
print()
# News item URL's lead to the page with the full article.
# This page can have any kind of formatting.
# There is no default way to read it.
# But we could just download the source HTML and convert it to plain text:
#html = URL(result.url).download()
# print plaintext(html)
# The resulting text may contain a lot of garbage.
# A better way is to use a DOM parser to select the HTML elements we want.
# This is demonstrated in one of the next examples.
|
shubhangiKishore/pattern
|
examples/01-web/06-feed.py
|
Python
|
bsd-3-clause
| 1,248
|
## datastructure.py
## Author: Yangfeng Ji
## Date: 08-29-2013
## Time-stamp: <yangfeng 02/14/2015 00:28:50>
class SpanNode(object):
""" RST tree node
"""
def __init__(self, prop):
""" Initialization of SpanNode
:type text: string
:param text: text of this span
"""
# Text of this span / Discourse relation
self.text, self.relation = None, None
# EDU span / Nucleus span (begin, end) index
self.eduspan, self.nucspan = None, None
# Nucleus single EDU
self.nucedu = None
# Property
self.prop = prop
# Children node
# Each of them is a node instance
# N-S form (for binary RST tree only)
self.lnode, self.rnode = None, None
# Parent node
self.pnode = None
# Node list (for general RST tree only)
self.nodelist = []
# Relation form: NN, NS, SN
self.form = None
class ParseError(Exception):
""" Exception for parsing
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ActionError(Exception):
""" Exception for illegal parsing action
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Token(object):
""" Token class
"""
def __init__(self):
# Sentence index, token index (within sent)
self.sidx, self.tidx = None, None
# Word, Lemma
self.word, self.lemma = None, None
# POS tag
self.pos = None
# Dependency label, head index
self.deplabel, self.hidx = None, None
# NER, Partial parse tree
self.ner, self.partialparse = None, None
# EDU index
self.eduidx = None
class Doc(object):
""" Document
"""
def __init__(self):
# Token dict
self.tokendict = None
# EDU dict
self.edudict = None
# Relation pair
self.relapairs = None
|
jiyfeng/DPLP
|
code/datastructure.py
|
Python
|
mit
| 2,050
|
import myhdl
from myhdl import (Signal, ResetSignal, intbv, always_seq, always,
always_comb)
@myhdl.block
def blinky(led, clock, reset=None):
assert len(led) >= 2
nled = len(led)
maxcnt = int(clock.frequency)
cnt = Signal(intbv(0,min=0,max=maxcnt))
toggle = Signal(bool(0))
@always_seq(clock.posedge, reset=reset)
def rtl():
if cnt == maxcnt-1:
cnt.next = 0
toggle.next = not toggle
else:
cnt.next = cnt + 1
@always_comb
def rtl_assign():
led.next[0] = toggle
led.next[1] = not toggle
for ii in range(2, nled):
led.next[ii] = 0
if reset is None:
reset = ResetSignal(0, active=0, isasync=False)
@always(clock.posedge)
def rtl_reset():
reset.next = not reset.active
g = (rtl, rtl_assign, rtl_reset,)
else:
g = (rtl, rtl_assign,)
return g
|
cfelton/rhea
|
examples/build/blink.py
|
Python
|
mit
| 961
|
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traces an executable and its child processes and extract the files accessed
by them.
The implementation uses OS-specific API. The native Kernel logger and the ETL
interface is used on Windows. Dtrace is used on OSX. Strace is used otherwise.
The OS-specific implementation is hidden in an 'API' interface.
The results are embedded in a Results instance. The tracing is done in two
phases, the first is to do the actual trace and generate an
implementation-specific log file. Then the log file is parsed to extract the
information, including the individual child processes and the files accessed
from the log.
"""
import codecs
import csv
import errno
import getpass
import glob
import json
import logging
import optparse
import os
import re
import stat
import subprocess
import sys
import tempfile
import threading
import time
import unicodedata
import weakref
## OS-specific imports
if sys.platform == 'win32':
from ctypes.wintypes import byref, create_unicode_buffer, c_int, c_wchar_p
from ctypes.wintypes import windll, FormatError # pylint: disable=E0611
from ctypes.wintypes import GetLastError # pylint: disable=E0611
elif sys.platform == 'darwin':
import Carbon.File # pylint: disable=F0401
import MacOS # pylint: disable=F0401
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
class TracingFailure(Exception):
"""An exception occured during tracing."""
def __init__(self, description, pid, line_number, line, *args):
super(TracingFailure, self).__init__(
description, pid, line_number, line, *args)
self.description = description
self.pid = pid
self.line_number = line_number
self.line = line
self.extra = args
def __str__(self):
out = self.description
if self.pid:
out += '\npid: %d' % self.pid
if self.line_number:
out += '\nline: %d' % self.line_number
if self.line:
out += '\n%s' % self.line
if self.extra:
out += '\n' + ', '.join(map(str, filter(None, self.extra)))
return out
## OS-specific functions
if sys.platform == 'win32':
def QueryDosDevice(drive_letter):
"""Returns the Windows 'native' path for a DOS drive letter."""
assert re.match(r'^[a-zA-Z]:$', drive_letter), drive_letter
assert isinstance(drive_letter, unicode)
# Guesswork. QueryDosDeviceW never returns the required number of bytes.
chars = 1024
drive_letter = drive_letter
p = create_unicode_buffer(chars)
if 0 == windll.kernel32.QueryDosDeviceW(drive_letter, p, chars):
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'QueryDosDevice(%s): %s (%d)' % (
drive_letter, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
return p.value
def GetShortPathName(long_path):
"""Returns the Windows short path equivalent for a 'long' path."""
assert isinstance(long_path, unicode), repr(long_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(long_path) and not long_path.startswith('\\\\?\\'):
long_path = '\\\\?\\' + long_path
chars = windll.kernel32.GetShortPathNameW(long_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetShortPathNameW(long_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'GetShortPathName(%s): %s (%d)' % (
long_path, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
def GetLongPathName(short_path):
"""Returns the Windows long path equivalent for a 'short' path."""
assert isinstance(short_path, unicode)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(short_path) and not short_path.startswith('\\\\?\\'):
short_path = '\\\\?\\' + short_path
chars = windll.kernel32.GetLongPathNameW(short_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetLongPathNameW(short_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
msg = u'GetLongPathName(%s): %s (%d)' % (
short_path, FormatError(err), err)
raise WindowsError(err, msg.encode('utf-8'))
def get_current_encoding():
"""Returns the 'ANSI' code page associated to the process."""
return 'cp%d' % int(windll.kernel32.GetACP())
class DosDriveMap(object):
"""Maps \Device\HarddiskVolumeN to N: on Windows."""
# Keep one global cache.
_MAPPING = {}
def __init__(self):
"""Lazy loads the cache."""
if not self._MAPPING:
# This is related to UNC resolver on windows. Ignore that.
self._MAPPING[u'\\Device\\Mup'] = None
self._MAPPING[u'\\SystemRoot'] = os.environ[u'SystemRoot']
for letter in (chr(l) for l in xrange(ord('C'), ord('Z')+1)):
try:
letter = u'%s:' % letter
mapped = QueryDosDevice(letter)
if mapped in self._MAPPING:
logging.warn(
('Two drives: \'%s\' and \'%s\', are mapped to the same disk'
'. Drive letters are a user-mode concept and the kernel '
'traces only have NT path, so all accesses will be '
'associated with the first drive letter, independent of the '
'actual letter used by the code') % (
self._MAPPING[mapped], letter))
else:
self._MAPPING[mapped] = letter
except WindowsError: # pylint: disable=E0602
pass
def to_win32(self, path):
"""Converts a native NT path to Win32/DOS compatible path."""
match = re.match(r'(^\\Device\\[a-zA-Z0-9]+)(\\.*)?$', path)
if not match:
raise ValueError(
'Can\'t convert %s into a Win32 compatible path' % path,
path)
if not match.group(1) in self._MAPPING:
# Unmapped partitions may be accessed by windows for the
# fun of it while the test is running. Discard these.
return None
drive = self._MAPPING[match.group(1)]
if not drive or not match.group(2):
return drive
return drive + match.group(2)
def isabs(path):
"""Accepts X: as an absolute path, unlike python's os.path.isabs()."""
return os.path.isabs(path) or len(path) == 2 and path[1] == ':'
def find_item_native_case(root, item):
"""Gets the native path case of a single item based at root_path."""
if item == '..':
return item
root = get_native_path_case(root)
return os.path.basename(get_native_path_case(os.path.join(root, item)))
def get_native_path_case(p):
"""Returns the native path case for an existing file.
On Windows, removes any leading '\\?\'.
"""
assert isinstance(p, unicode), repr(p)
if not isabs(p):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % p, p)
# Make sure it is normalized to os.path.sep. Do not do it here to keep the
# function fast
assert '/' not in p, p
suffix = ''
count = p.count(':')
if count > 1:
# This means it has an alternate-data stream. There could be 3 ':', since
# it could be the $DATA datastream of an ADS. Split the whole ADS suffix
# off and add it back afterward. There is no way to know the native path
# case of an alternate data stream.
items = p.split(':')
p = ':'.join(items[0:2])
suffix = ''.join(':' + i for i in items[2:])
# TODO(maruel): Use os.path.normpath?
if p.endswith('.\\'):
p = p[:-2]
# Windows used to have an option to turn on case sensitivity on non Win32
# subsystem but that's out of scope here and isn't supported anymore.
# Go figure why GetShortPathName() is needed.
try:
out = GetLongPathName(GetShortPathName(p))
except OSError, e:
if e.args[0] in (2, 3, 5):
# The path does not exist. Try to recurse and reconstruct the path.
base = os.path.dirname(p)
rest = os.path.basename(p)
return os.path.join(get_native_path_case(base), rest)
raise
if out.startswith('\\\\?\\'):
out = out[4:]
# Always upper case the first letter since GetLongPathName() will return the
# drive letter in the case it was given.
return out[0].upper() + out[1:] + suffix
def CommandLineToArgvW(command_line):
"""Splits a commandline into argv using CommandLineToArgvW()."""
# http://msdn.microsoft.com/library/windows/desktop/bb776391.aspx
size = c_int()
assert isinstance(command_line, unicode)
ptr = windll.shell32.CommandLineToArgvW(command_line, byref(size))
try:
return [arg for arg in (c_wchar_p * size.value).from_address(ptr)]
finally:
windll.kernel32.LocalFree(ptr)
elif sys.platform == 'darwin':
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def _native_case(p):
"""Gets the native path case. Warning: this function resolves symlinks."""
try:
rel_ref, _ = Carbon.File.FSPathMakeRef(p.encode('utf-8'))
# The OSX underlying code uses NFD but python strings are in NFC. This
# will cause issues with os.listdir() for example. Since the dtrace log
# *is* in NFC, normalize it here.
out = unicodedata.normalize(
'NFC', rel_ref.FSRefMakePath().decode('utf-8'))
if p.endswith(os.path.sep) and not out.endswith(os.path.sep):
return out + os.path.sep
return out
except MacOS.Error, e:
if e.args[0] in (-43, -120):
# The path does not exist. Try to recurse and reconstruct the path.
# -43 means file not found.
# -120 means directory not found.
base = os.path.dirname(p)
rest = os.path.basename(p)
return os.path.join(_native_case(base), rest)
raise OSError(
e.args[0], 'Failed to get native path for %s' % p, p, e.args[1])
def _split_at_symlink_native(base_path, rest):
"""Returns the native path for a symlink."""
base, symlink, rest = split_at_symlink(base_path, rest)
if symlink:
if not base_path:
base_path = base
else:
base_path = safe_join(base_path, base)
symlink = find_item_native_case(base_path, symlink)
return base, symlink, rest
def find_item_native_case(root_path, item):
"""Gets the native path case of a single item based at root_path.
There is no API to get the native path case of symlinks on OSX. So it
needs to be done the slow way.
"""
if item == '..':
return item
item = item.lower()
for element in os.listdir(root_path):
if element.lower() == item:
return element
def get_native_path_case(path):
"""Returns the native path case for an existing file.
Technically, it's only HFS+ on OSX that is case preserving and
insensitive. It's the default setting on HFS+ but can be changed.
"""
assert isinstance(path, unicode), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
if path.startswith('/dev'):
# /dev is not visible from Carbon, causing an exception.
return path
# Starts assuming there is no symlink along the path.
resolved = _native_case(path)
if path.lower() in (resolved.lower(), resolved.lower() + './'):
# This code path is incredibly faster.
logging.debug('get_native_path_case(%s) = %s' % (path, resolved))
return resolved
# There was a symlink, process it.
base, symlink, rest = _split_at_symlink_native(None, path)
assert symlink, (path, base, symlink, rest, resolved)
prev = base
base = safe_join(_native_case(base), symlink)
assert len(base) > len(prev)
while rest:
prev = base
relbase, symlink, rest = _split_at_symlink_native(base, rest)
base = safe_join(base, relbase)
assert len(base) > len(prev), (prev, base, symlink)
if symlink:
base = safe_join(base, symlink)
assert len(base) > len(prev), (prev, base, symlink)
# Make sure no symlink was resolved.
assert base.lower() == path.lower(), (base, path)
logging.debug('get_native_path_case(%s) = %s' % (path, base))
return base
else: # OSes other than Windows and OSX.
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def find_item_native_case(root, item):
"""Gets the native path case of a single item based at root_path."""
if item == '..':
return item
root = get_native_path_case(root)
return os.path.basename(get_native_path_case(os.path.join(root, item)))
def get_native_path_case(path):
"""Returns the native path case for an existing file.
On OSes other than OSX and Windows, assume the file system is
case-sensitive.
TODO(maruel): This is not strictly true. Implement if necessary.
"""
assert isinstance(path, unicode), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
# Give up on cygwin, as GetLongPathName() can't be called.
# Linux traces tends to not be normalized so use this occasion to normalize
# it. This function implementation already normalizes the path on the other
# OS so this needs to be done here to be coherent between OSes.
out = os.path.normpath(path)
if path.endswith(os.path.sep) and not out.endswith(os.path.sep):
return out + os.path.sep
return out
if sys.platform != 'win32': # All non-Windows OSes.
def safe_join(*args):
"""Joins path elements like os.path.join() but doesn't abort on absolute
path.
os.path.join('foo', '/bar') == '/bar'
but safe_join('foo', '/bar') == 'foo/bar'.
"""
out = ''
for element in args:
if element.startswith(os.path.sep):
if out.endswith(os.path.sep):
out += element[1:]
else:
out += element
else:
if out.endswith(os.path.sep):
out += element
else:
out += os.path.sep + element
return out
def split_at_symlink(base_dir, relfile):
"""Scans each component of relfile and cut the string at the symlink if
there is any.
Returns a tuple (base_path, symlink, rest), with symlink == rest == None if
not symlink was found.
"""
if base_dir:
assert relfile
assert os.path.isabs(base_dir)
index = 0
else:
assert os.path.isabs(relfile)
index = 1
def at_root(rest):
if base_dir:
return safe_join(base_dir, rest)
return rest
while True:
try:
index = relfile.index(os.path.sep, index)
except ValueError:
index = len(relfile)
full = at_root(relfile[:index])
if os.path.islink(full):
# A symlink!
base = os.path.dirname(relfile[:index])
symlink = os.path.basename(relfile[:index])
rest = relfile[index:]
logging.debug(
'split_at_symlink(%s, %s) -> (%s, %s, %s)' %
(base_dir, relfile, base, symlink, rest))
return base, symlink, rest
if index == len(relfile):
break
index += 1
return relfile, None, None
class Unbuffered(object):
"""Disable buffering on a file object."""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
if '\n' in data:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def disable_buffering():
"""Makes this process and child processes stdout unbuffered."""
if not os.environ.get('PYTHONUNBUFFERED'):
# Since sys.stdout is a C++ object, it's impossible to do
# sys.stdout.write = lambda...
sys.stdout = Unbuffered(sys.stdout)
os.environ['PYTHONUNBUFFERED'] = 'x'
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def create_subprocess_thunk():
"""Creates a small temporary script to start the child process.
This thunk doesn't block, its unique name is used to identify it as the
parent.
"""
handle, name = tempfile.mkstemp(prefix='trace_inputs_thunk', suffix='.py')
try:
os.write(
handle,
(
'import subprocess, sys\n'
'sys.exit(subprocess.call(sys.argv[2:]))\n'
))
finally:
os.close(handle)
return name
def create_exec_thunk():
"""Creates a small temporary script to start the child executable.
Reads from the file handle provided as the fisrt argument to block, then
execv() the command to be traced.
"""
handle, name = tempfile.mkstemp(prefix='trace_inputs_thunk', suffix='.py')
try:
os.write(
handle,
(
'import os, sys\n'
'fd = int(sys.argv[1])\n'
# This will block until the controlling process writes a byte on the
# pipe. It will do so once the tracing tool, e.g. strace, is ready to
# trace.
'os.read(fd, 1)\n'
'os.close(fd)\n'
'os.execv(sys.argv[2], sys.argv[2:])\n'
))
finally:
os.close(handle)
return name
def strace_process_quoted_arguments(text):
"""Extracts quoted arguments on a string and return the arguments as a list.
Implemented as an automaton. Supports incomplete strings in the form
'"foo"...'.
Example:
With text = '"foo", "bar"', the function will return ['foo', 'bar']
TODO(maruel): Implement escaping.
"""
# All the possible states of the DFA.
( NEED_QUOTE, # Begining of a new arguments.
INSIDE_STRING, # Inside an argument.
ESCAPED, # Found a '\' inside a quote. Treat the next char as-is.
NEED_COMMA_OR_DOT, # Right after the closing quote of an argument. Could be
# a serie of 3 dots or a comma.
NEED_SPACE, # Right after a comma
NEED_DOT_2, # Found a dot, need a second one.
NEED_DOT_3, # Found second dot, need a third one.
NEED_COMMA, # Found third dot, need a comma.
) = range(8)
state = NEED_QUOTE
out = []
for index, char in enumerate(text):
if char == '"':
if state == NEED_QUOTE:
state = INSIDE_STRING
# A new argument was found.
out.append('')
elif state == INSIDE_STRING:
# The argument is now closed.
state = NEED_COMMA_OR_DOT
elif state == ESCAPED:
out[-1] += char
state = INSIDE_STRING
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
elif char == ',':
if state in (NEED_COMMA_OR_DOT, NEED_COMMA):
state = NEED_SPACE
elif state == INSIDE_STRING:
out[-1] += char
elif state == ESCAPED:
out[-1] += char
state = INSIDE_STRING
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
elif char == ' ':
if state == NEED_SPACE:
state = NEED_QUOTE
elif state == INSIDE_STRING:
out[-1] += char
elif state == ESCAPED:
out[-1] += char
state = INSIDE_STRING
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
elif char == '.':
if state in (NEED_QUOTE, NEED_COMMA_OR_DOT):
# The string is incomplete, this mean the strace -s flag should be
# increased.
# For NEED_QUOTE, the input string would look like '"foo", ...'.
# For NEED_COMMA_OR_DOT, the input string would look like '"foo"...'
state = NEED_DOT_2
elif state == NEED_DOT_2:
state = NEED_DOT_3
elif state == NEED_DOT_3:
state = NEED_COMMA
elif state == INSIDE_STRING:
out[-1] += char
elif state == ESCAPED:
out[-1] += char
state = INSIDE_STRING
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
elif char == '\\':
if state == ESCAPED:
out[-1] += char
state = INSIDE_STRING
elif state == INSIDE_STRING:
state = ESCAPED
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
else:
if state == INSIDE_STRING:
out[-1] += char
else:
raise ValueError(
'Can\'t process char \'%s\' at column %d for: %r' % (
char, index, text),
index,
text)
if state not in (NEED_COMMA, NEED_COMMA_OR_DOT):
raise ValueError(
'String is incorrectly terminated: %r' % text,
text)
return out
def read_json(filepath):
with open(filepath, 'r') as f:
return json.load(f)
def write_json(filepath_or_handle, data, dense):
"""Writes data into filepath or file handle encoded as json.
If dense is True, the json is packed. Otherwise, it is human readable.
"""
if hasattr(filepath_or_handle, 'write'):
if dense:
filepath_or_handle.write(
json.dumps(data, sort_keys=True, separators=(',',':')))
else:
filepath_or_handle.write(json.dumps(data, sort_keys=True, indent=2))
else:
with open(filepath_or_handle, 'wb') as f:
if dense:
json.dump(data, f, sort_keys=True, separators=(',',':'))
else:
json.dump(data, f, sort_keys=True, indent=2)
def assert_is_renderable(pseudo_string):
"""Asserts the input is a valid object to be processed by render()."""
assert (
pseudo_string is None or
isinstance(pseudo_string, unicode) or
hasattr(pseudo_string, 'render')), repr(pseudo_string)
def render(pseudo_string):
"""Converts the pseudo-string to an unicode string."""
if pseudo_string is None or isinstance(pseudo_string, unicode):
return pseudo_string
return pseudo_string.render()
class Results(object):
"""Results of a trace session."""
class _TouchedObject(object):
"""Something, a file or a directory, that was accessed."""
def __init__(self, root, path, tainted, size, nb_files):
logging.debug(
'%s(%s, %s, %s, %s, %s)' %
(self.__class__.__name__, root, path, tainted, size, nb_files))
assert_is_renderable(root)
assert_is_renderable(path)
self.root = root
self.path = path
self.tainted = tainted
self.nb_files = nb_files
# Can be used as a cache or a default value, depending on context. In
# particular, once self.tainted is True, because the path was replaced
# with a variable, it is not possible to look up the file size.
self._size = size
# These are cache only.
self._real_path = None
# Check internal consistency.
assert path, path
assert tainted or bool(root) != bool(isabs(path)), (root, path)
assert tainted or (
not os.path.exists(self.full_path) or
(self.full_path == get_native_path_case(self.full_path))), (
tainted, self.full_path, get_native_path_case(self.full_path))
@property
def existent(self):
return self.size != -1
@property
def full_path(self):
if self.root:
return os.path.join(self.root, self.path)
return self.path
@property
def real_path(self):
"""Returns the path with symlinks resolved."""
if not self._real_path:
self._real_path = os.path.realpath(self.full_path)
return self._real_path
@property
def size(self):
"""File's size. -1 is not existent.
Once tainted, it is not possible the retrieve the file size anymore since
the path is composed of variables.
"""
if self._size is None and not self.tainted:
try:
self._size = os.stat(self.full_path).st_size
except OSError:
self._size = -1
return self._size
def flatten(self):
"""Returns a dict representing this object.
A 'size' of 0 means the file was only touched and not read.
"""
return {
'path': self.path,
'size': self.size,
}
def replace_variables(self, variables):
"""Replaces the root of this File with one of the variables if it matches.
If a variable replacement occurs, the cloned object becomes tainted.
"""
for variable, root_path in variables.iteritems():
if self.path.startswith(root_path):
return self._clone(
self.root, variable + self.path[len(root_path):], True)
# No need to clone, returns ourself.
return self
def strip_root(self, root):
"""Returns a clone of itself with 'root' stripped off.
Note that the file is kept if it is either accessible from a symlinked
path that was used to access the file or through the real path.
"""
# Check internal consistency.
assert self.tainted or (isabs(root) and root.endswith(os.path.sep)), root
if not self.full_path.startswith(root):
# Now try to resolve the symlinks to see if it can be reached this way.
# Only try *after* trying without resolving symlink.
if not self.real_path.startswith(root):
return None
path = self.real_path
else:
path = self.full_path
return self._clone(root, path[len(root):], self.tainted)
def _clone(self, new_root, new_path, tainted):
raise NotImplementedError(self.__class__.__name__)
class File(_TouchedObject):
"""A file that was accessed. May not be present anymore.
If tainted is true, it means it is not a real path anymore as a variable
replacement occured.
|mode| can be one of None, TOUCHED, READ or WRITE.
"""
# Was probed for existence, and it is existent, but was never _opened_.
TOUCHED = 't'
# Opened for read only and guaranteed to not have been written to.
READ = 'r'
# Opened for write.
WRITE = 'w'
# They are listed in order of priority. E.g. if a file is traced as TOUCHED
# then as WRITE, only keep WRITE. None means no idea, which is a problem on
# Windows.
ACCEPTABLE_MODES = (None, TOUCHED, READ, WRITE)
def __init__(self, root, path, tainted, size, mode):
assert mode in self.ACCEPTABLE_MODES
super(Results.File, self).__init__(root, path, tainted, size, 1)
self.mode = mode
def _clone(self, new_root, new_path, tainted):
"""Clones itself keeping meta-data."""
# Keep the self.size and self._real_path caches for performance reason. It
# is also important when the file becomes tainted (with a variable instead
# of the real path) since self.path is not an on-disk path anymore so
# out._size cannot be updated.
out = self.__class__(new_root, new_path, tainted, self.size, self.mode)
out._real_path = self._real_path
return out
def flatten(self):
out = super(Results.File, self).flatten()
out['mode'] = self.mode
return out
class Directory(_TouchedObject):
"""A directory of files. Must exist.
For a Directory instance, self.size is not a cache, it's an actual value
that is never modified and represents the total size of the files contained
in this directory. It is possible that the directory is empty so that
size==0; this happens if there's only an invalid symlink in it.
"""
def __init__(self, root, path, tainted, size, nb_files):
"""path='.' is a valid value and must be handled appropriately."""
assert not path.endswith(os.path.sep), path
super(Results.Directory, self).__init__(
root, path + os.path.sep, tainted, size, nb_files)
def flatten(self):
out = super(Results.Directory, self).flatten()
out['nb_files'] = self.nb_files
return out
def _clone(self, new_root, new_path, tainted):
"""Clones itself keeping meta-data."""
out = self.__class__(
new_root,
new_path.rstrip(os.path.sep),
tainted,
self.size,
self.nb_files)
out._real_path = self._real_path
return out
class Process(object):
"""A process that was traced.
Contains references to the files accessed by this process and its children.
"""
def __init__(self, pid, files, executable, command, initial_cwd, children):
logging.debug('Process(%s, %d, ...)' % (pid, len(files)))
self.pid = pid
self.files = sorted(files, key=lambda x: x.path)
self.children = children
self.executable = executable
self.command = command
self.initial_cwd = initial_cwd
# Check internal consistency.
assert len(set(f.path for f in self.files)) == len(self.files), sorted(
f.path for f in self.files)
assert isinstance(self.children, list)
assert isinstance(self.files, list)
@property
def all(self):
for child in self.children:
for i in child.all:
yield i
yield self
def flatten(self):
return {
'children': [c.flatten() for c in self.children],
'command': self.command,
'executable': self.executable,
'files': [f.flatten() for f in self.files],
'initial_cwd': self.initial_cwd,
'pid': self.pid,
}
def strip_root(self, root):
assert isabs(root) and root.endswith(os.path.sep), root
# Loads the files after since they are constructed as objects.
out = self.__class__(
self.pid,
filter(None, (f.strip_root(root) for f in self.files)),
self.executable,
self.command,
self.initial_cwd,
[c.strip_root(root) for c in self.children])
logging.debug(
'strip_root(%s) %d -> %d' % (root, len(self.files), len(out.files)))
return out
def __init__(self, process):
self.process = process
# Cache.
self._files = None
def flatten(self):
return {
'root': self.process.flatten(),
}
@property
def files(self):
if self._files is None:
self._files = sorted(
sum((p.files for p in self.process.all), []),
key=lambda x: x.path)
return self._files
@property
def existent(self):
return [f for f in self.files if f.existent]
@property
def non_existent(self):
return [f for f in self.files if not f.existent]
def strip_root(self, root):
"""Returns a clone with all the files outside the directory |root| removed
and converts all the path to be relative paths.
It keeps files accessible through the |root| directory or that have been
accessed through any symlink which points to the same directory.
"""
# Resolve any symlink
root = os.path.realpath(root)
root = get_native_path_case(root).rstrip(os.path.sep) + os.path.sep
logging.debug('strip_root(%s)' % root)
return Results(self.process.strip_root(root))
class ApiBase(object):
"""OS-agnostic API to trace a process and its children."""
class Context(object):
"""Processes one log line at a time and keeps the list of traced processes.
The parsing is complicated by the fact that logs are traced out of order for
strace but in-order for dtrace and logman. In addition, on Windows it is
very frequent that processids are reused so a flat list cannot be used. But
at the same time, it is impossible to faithfully construct a graph when the
logs are processed out of order. So both a tree and a flat mapping are used,
the tree is the real process tree, while the flat mapping stores the last
valid process for the corresponding processid. For the strace case, the
tree's head is guessed at the last moment.
"""
class Process(object):
"""Keeps context for one traced child process.
Logs all the files this process touched. Ignores directories.
"""
def __init__(self, blacklist, pid, initial_cwd):
# Check internal consistency.
assert isinstance(pid, int), repr(pid)
assert_is_renderable(initial_cwd)
self.pid = pid
# children are Process instances.
self.children = []
self.initial_cwd = initial_cwd
self.cwd = None
self.files = {}
self.executable = None
self.command = None
self._blacklist = blacklist
def to_results_process(self):
"""Resolves file case sensitivity and or late-bound strings."""
# When resolving files, it's normal to get dupe because a file could be
# opened multiple times with different case. Resolve the deduplication
# here.
def fix_path(x):
"""Returns the native file path case.
Converts late-bound strings.
"""
if not x:
# Do not convert None instance to 'None'.
return x
x = render(x)
if os.path.isabs(x):
# If the path is not absolute, which tends to happen occasionally on
# Windows, it is not possible to get the native path case so ignore
# that trace. It mostly happens for 'executable' value.
x = get_native_path_case(x)
return x
def fix_and_blacklist_path(x, m):
"""Receives a tuple (filepath, mode) and processes filepath."""
x = fix_path(x)
if not x:
return
# The blacklist needs to be reapplied, since path casing could
# influence blacklisting.
if self._blacklist(x):
return
# Filters out directories. Some may have passed through.
if os.path.isdir(x):
return
return x, m
# Renders all the files as strings, as some could be RelativePath
# instances. It is important to do it first since there could still be
# multiple entries with the same path but different modes.
rendered = (
fix_and_blacklist_path(f, m) for f, m in self.files.iteritems())
files = sorted(
(f for f in rendered if f),
key=lambda x: (x[0], Results.File.ACCEPTABLE_MODES.index(x[1])))
# Then converting into a dict will automatically clean up lesser
# important values.
files = [
Results.File(None, f, False, None, m)
for f, m in dict(files).iteritems()
]
return Results.Process(
self.pid,
files,
fix_path(self.executable),
self.command,
fix_path(self.initial_cwd),
[c.to_results_process() for c in self.children])
def add_file(self, filepath, mode):
"""Adds a file if it passes the blacklist."""
if self._blacklist(render(filepath)):
return
logging.debug('add_file(%d, %s, %s)', self.pid, filepath, mode)
# Note that filepath and not render(filepath) is added. It is because
# filepath could be something else than a string, like a RelativePath
# instance for dtrace logs.
modes = Results.File.ACCEPTABLE_MODES
old_mode = self.files.setdefault(filepath, mode)
if old_mode != mode and modes.index(old_mode) < modes.index(mode):
# Take the highest value.
self.files[filepath] = mode
def __init__(self, blacklist):
self.blacklist = blacklist
# Initial process.
self.root_process = None
# dict to accelerate process lookup, to not have to lookup the whole graph
# each time.
self._process_lookup = {}
class Tracer(object):
"""During it's lifetime, the tracing subsystem is enabled."""
def __init__(self, logname):
self._logname = logname
self._lock = threading.RLock()
self._traces = []
self._initialized = True
self._scripts_to_cleanup = []
def trace(self, cmd, cwd, tracename, output):
"""Runs the OS-specific trace program on an executable.
Arguments:
- cmd: The command (a list) to run.
- cwd: Current directory to start the child process in.
- tracename: Name of the trace in the logname file.
- output: If False, redirects output to PIPEs.
Returns a tuple (resultcode, output) and updates the internal trace
entries.
"""
# The implementation adds an item to self._traces.
raise NotImplementedError(self.__class__.__name__)
def close(self, _timeout=None):
"""Saves the meta-data in the logname file.
For kernel-based tracing, stops the tracing subsystem.
Must not be used manually when using 'with' construct.
"""
with self._lock:
if not self._initialized:
raise TracingFailure(
'Called %s.close() on an unitialized object' %
self.__class__.__name__,
None, None, None)
try:
while self._scripts_to_cleanup:
try:
os.remove(self._scripts_to_cleanup.pop())
except OSError as e:
logging.error('Failed to delete a temporary script: %s', e)
write_json(self._logname, self._gen_logdata(), False)
finally:
self._initialized = False
def post_process_log(self):
"""Post-processes the log so it becomes faster to load afterward.
Must not be used manually when using 'with' construct.
"""
assert not self._initialized, 'Must stop tracing first.'
def _gen_logdata(self):
"""Returns the data to be saved in the trace file."""
return {
'traces': self._traces,
}
def __enter__(self):
"""Enables 'with' statement."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Enables 'with' statement."""
self.close()
# If an exception was thrown, do not process logs.
if not exc_type:
self.post_process_log()
def get_tracer(self, logname):
"""Returns an ApiBase.Tracer instance.
Initializes the tracing subsystem, which is a requirement for kernel-based
tracers. Only one tracer instance should be live at a time!
logname is the filepath to the json file that will contain the meta-data
about the logs.
"""
return self.Tracer(logname)
@staticmethod
def clean_trace(logname):
"""Deletes an old log."""
raise NotImplementedError()
@classmethod
def parse_log(cls, logname, blacklist, trace_name):
"""Processes trace logs and returns the files opened and the files that do
not exist.
It does not track directories.
Arguments:
- logname: must be an absolute path.
- blacklist: must be a lambda.
- trace_name: optional trace to read, defaults to reading all traces.
Most of the time, files that do not exist are temporary test files that
should be put in /tmp instead. See http://crbug.com/116251.
Returns a list of dict with keys:
- results: A Results instance.
- trace: The corresponding tracename parameter provided to
get_tracer().trace().
- output: Output gathered during execution, if get_tracer().trace(...,
output=False) was used.
"""
raise NotImplementedError(cls.__class__.__name__)
class Strace(ApiBase):
"""strace implies linux."""
@staticmethod
def load_filename(filename):
"""Parses a filename in a log."""
# TODO(maruel): Be compatible with strace -x.
assert isinstance(filename, str)
out = ''
i = 0
while i < len(filename):
c = filename[i]
if c == '\\':
out += chr(int(filename[i+1:i+4], 8))
i += 4
else:
out += c
i += 1
# TODO(maruel): That's not necessarily true that the current code page is
# utf-8.
return out.decode('utf-8')
class Context(ApiBase.Context):
"""Processes a strace log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
Uses late-binding to processes the cwd of each process. The problem is that
strace generates one log file per process it traced but doesn't give any
information about which process was started when and by who. So process the
logs out of order and use late binding with RelativePath to be able to
deduce the initial directory of each process once all the logs are parsed.
TODO(maruel): Use the script even in the non-sudo case, so log parsing can
be done in two phase: first find the root process, then process the child
processes in order. With that, it should be possible to not use RelativePath
anymore. This would significantly simplify the code!
"""
class Process(ApiBase.Context.Process):
"""Represents the state of a process.
Contains all the information retrieved from the pid-specific log.
"""
# Function names are using ([a-z_0-9]+)
# This is the most common format. function(args) = result
RE_HEADER = re.compile(r'^([a-z_0-9]+)\((.*?)\)\s+= (.+)$')
# An interrupted function call, only grab the minimal header.
RE_UNFINISHED = re.compile(r'^([^\(]+)(.*) \<unfinished \.\.\.\>$')
# A resumed function call.
RE_RESUMED = re.compile(r'^<\.\.\. ([^ ]+) resumed> (.+)$')
# A process received a signal.
RE_SIGNAL = re.compile(r'^--- SIG[A-Z]+ .+ ---')
# A process didn't handle a signal. Ignore any junk appearing before,
# because the process was forcibly killed so it won't open any new file.
RE_KILLED = re.compile(
r'^.*\+\+\+ killed by ([A-Z]+)( \(core dumped\))? \+\+\+$')
# The process has exited.
RE_PROCESS_EXITED = re.compile(r'^\+\+\+ exited with (\d+) \+\+\+')
# A call was canceled. Ignore any prefix.
RE_UNAVAILABLE = re.compile(r'^.*\)\s*= \? <unavailable>$')
# Happens when strace fails to even get the function name.
UNNAMED_FUNCTION = '????'
# Corner-case in python, a class member function decorator must not be
# @staticmethod.
def parse_args(regexp, expect_zero): # pylint: disable=E0213
"""Automatically convert the str 'args' into a list of processed
arguments.
Arguments:
- regexp is used to parse args.
- expect_zero: one of True, False or None.
- True: will check for result.startswith('0') first and will ignore
the trace line completely otherwise. This is important because for
many functions, the regexp will not process if the call failed.
- False: will check for not result.startswith(('?', '-1')) for the
same reason than with True.
- None: ignore result.
"""
def meta_hook(function):
assert function.__name__.startswith('handle_')
def hook(self, args, result):
if expect_zero is True and not result.startswith('0'):
return
if expect_zero is False and result.startswith(('?', '-1')):
return
match = re.match(regexp, args)
if not match:
raise TracingFailure(
'Failed to parse %s(%s) = %s' %
(function.__name__[len('handle_'):], args, result),
None, None, None)
return function(self, match.groups(), result)
return hook
return meta_hook
class RelativePath(object):
"""A late-bound relative path."""
def __init__(self, parent, value):
assert_is_renderable(parent)
self.parent = parent
assert (
value is None or
(isinstance(value, unicode) and not os.path.isabs(value)))
self.value = value
if self.value:
# TODO(maruel): On POSIX, '\\' is a valid character so remove this
# assert.
assert '\\' not in self.value, value
assert '\\' not in self.value, (repr(value), repr(self.value))
def render(self):
"""Returns the current directory this instance is representing.
This function is used to return the late-bound value.
"""
assert self.parent is not None
parent = render(self.parent)
if self.value:
return os.path.normpath(os.path.join(parent, self.value))
return parent
def __init__(self, root, pid):
"""Keeps enough information to be able to guess the original process
root.
strace doesn't store which process was the initial process. So more
information needs to be kept so the graph can be reconstructed from the
flat map.
"""
logging.info('%s(%d)' % (self.__class__.__name__, pid))
super(Strace.Context.Process, self).__init__(root.blacklist, pid, None)
assert isinstance(root, ApiBase.Context)
self._root = weakref.ref(root)
# The dict key is the function name of the pending call, like 'open'
# or 'execve'.
self._pending_calls = {}
self._line_number = 0
# Current directory when the process started.
if isinstance(self._root(), unicode):
self.initial_cwd = self._root()
else:
self.initial_cwd = self.RelativePath(self._root(), None)
self.parentid = None
self._done = False
def get_cwd(self):
"""Returns the best known value of cwd."""
return self.cwd or self.initial_cwd
def render(self):
"""Returns the string value of the RelativePath() object.
Used by RelativePath. Returns the initial directory and not the
current one since the current directory 'cwd' validity is time-limited.
The validity is only guaranteed once all the logs are processed.
"""
return self.initial_cwd.render()
def on_line(self, line):
assert isinstance(line, str)
self._line_number += 1
try:
if self._done:
raise TracingFailure(
'Found a trace for a terminated process or corrupted log',
None, None, None)
if self.RE_SIGNAL.match(line):
# Ignore signals.
return
match = self.RE_KILLED.match(line)
if match:
# Converts a '+++ killed by Foo +++' trace into an exit_group().
self.handle_exit_group(match.group(1), None)
return
match = self.RE_PROCESS_EXITED.match(line)
if match:
# Converts a '+++ exited with 1 +++' trace into an exit_group()
self.handle_exit_group(match.group(1), None)
return
match = self.RE_UNFINISHED.match(line)
if match:
if match.group(1) in self._pending_calls:
raise TracingFailure(
'Found two unfinished calls for the same function',
None, None, None,
self._pending_calls)
self._pending_calls[match.group(1)] = (
match.group(1) + match.group(2))
return
match = self.RE_UNAVAILABLE.match(line)
if match:
# This usually means a process was killed and a pending call was
# canceled.
# TODO(maruel): Look up the last exit_group() trace just above and
# make sure any self._pending_calls[anything] is properly flushed.
return
match = self.RE_RESUMED.match(line)
if match:
if match.group(1) not in self._pending_calls:
raise TracingFailure(
'Found a resumed call that was not logged as unfinished',
None, None, None,
self._pending_calls)
pending = self._pending_calls.pop(match.group(1))
# Reconstruct the line.
line = pending + match.group(2)
match = self.RE_HEADER.match(line)
if not match:
# The line is corrupted. It happens occasionally when a process is
# killed forcibly with activity going on. Assume the process died.
# No other line can be processed afterward.
logging.debug('%d is done: %s', self.pid, line)
self._done = True
return
if match.group(1) == self.UNNAMED_FUNCTION:
return
# It's a valid line, handle it.
handler = getattr(self, 'handle_%s' % match.group(1), None)
if not handler:
self._handle_unknown(match.group(1), match.group(2), match.group(3))
return handler(match.group(2), match.group(3))
except TracingFailure, e:
# Hack in the values since the handler could be a static function.
e.pid = self.pid
e.line = line
e.line_number = self._line_number
# Re-raise the modified exception.
raise
except (KeyError, NotImplementedError, ValueError), e:
raise TracingFailure(
'Trace generated a %s exception: %s' % (
e.__class__.__name__, str(e)),
self.pid,
self._line_number,
line,
e)
@parse_args(r'^\"(.+?)\", [FKORWX_|]+$', True)
def handle_access(self, args, _result):
self._handle_file(args[0], Results.File.TOUCHED)
@parse_args(r'^\"(.+?)\"$', True)
def handle_chdir(self, args, _result):
"""Updates cwd."""
self.cwd = self._mangle(args[0])
logging.debug('handle_chdir(%d, %s)' % (self.pid, self.cwd))
@parse_args(r'^\"(.+?)\", (\d+), (\d+)$', False)
def handle_chown(self, args, _result):
# TODO(maruel): Look at result?
self._handle_file(args[0], Results.File.WRITE)
def handle_clone(self, _args, result):
self._handling_forking('clone', result)
def handle_close(self, _args, _result):
pass
@parse_args(r'^\"(.+?)\", (\d+)$', False)
def handle_chmod(self, args, _result):
self._handle_file(args[0], Results.File.WRITE)
@parse_args(r'^\"(.+?)\", (\d+)$', False)
def handle_creat(self, args, _result):
self._handle_file(args[0], Results.File.WRITE)
@parse_args(r'^\"(.+?)\", \[(.+)\], \[\/\* \d+ vars? \*\/\]$', True)
def handle_execve(self, args, _result):
# Even if in practice execve() doesn't returns when it succeeds, strace
# still prints '0' as the result.
filepath = args[0]
self._handle_file(filepath, Results.File.READ)
self.executable = self._mangle(filepath)
try:
self.command = strace_process_quoted_arguments(args[1])
except ValueError as e:
raise TracingFailure(
'Failed to process command line argument:\n%s' % e.args[0],
None, None, None)
def handle_exit_group(self, _args, _result):
"""Removes cwd."""
self.cwd = None
@parse_args(r'^(\d+|AT_FDCWD), \"(.*?)\", ([A-Z\_\|]+)(|, \d+)$', True)
def handle_faccessat(self, args, _results):
if args[0] == 'AT_FDCWD':
self._handle_file(args[1], Results.File.TOUCHED)
else:
raise Exception('Relative faccess not implemented.')
def handle_fallocate(self, _args, result):
pass
def handle_fork(self, args, result):
self._handle_unknown('fork', args, result)
def handle_futex(self, _args, _result):
pass
@parse_args(r'^\"(.+?)\", (\d+)$', False)
def handle_getcwd(self, args, _result):
if os.path.isabs(args[0]):
logging.debug('handle_chdir(%d, %s)' % (self.pid, self.cwd))
if not isinstance(self.cwd, unicode):
# Take the occasion to reset the path.
self.cwd = self._mangle(args[0])
else:
# It should always match.
assert self.cwd == Strace.load_filename(args[0]), (
self.cwd, args[0])
@parse_args(r'^\"(.+?)\", \"(.+?)\"$', True)
def handle_link(self, args, _result):
self._handle_file(args[0], Results.File.READ)
self._handle_file(args[1], Results.File.WRITE)
@parse_args(r'\"(.+?)\", \{.+?, \.\.\.\}', True)
def handle_lstat(self, args, _result):
self._handle_file(args[0], Results.File.TOUCHED)
def handle_mkdir(self, _args, _result):
# We track content, not directories.
pass
@parse_args(r'^\"(.*?)\", ([A-Z\_\|]+)(|, \d+)$', False)
def handle_open(self, args, _result):
if 'O_DIRECTORY' in args[1]:
return
self._handle_file(
args[0],
Results.File.READ if 'O_RDONLY' in args[1] else Results.File.WRITE)
@parse_args(r'^(\d+|AT_FDCWD), \"(.*?)\", ([A-Z\_\|]+)(|, \d+)$', False)
def handle_openat(self, args, _result):
if 'O_DIRECTORY' in args[2]:
return
if args[0] == 'AT_FDCWD':
self._handle_file(
args[1],
Results.File.READ if 'O_RDONLY' in args[2]
else Results.File.WRITE)
else:
# TODO(maruel): Implement relative open if necessary instead of the
# AT_FDCWD flag, let's hope not since this means tracking all active
# directory handles.
raise NotImplementedError('Relative open via openat not implemented.')
@parse_args(r'^\"(.+?)\", \".+?\"(\.\.\.)?, \d+$', False)
def handle_readlink(self, args, _result):
self._handle_file(args[0], Results.File.READ)
@parse_args(r'^\"(.+?)\", \"(.+?)\"$', True)
def handle_rename(self, args, _result):
self._handle_file(args[0], Results.File.READ)
self._handle_file(args[1], Results.File.WRITE)
def handle_rmdir(self, _args, _result):
# We track content, not directories.
pass
def handle_setxattr(self, _args, _result):
# TODO(maruel): self._handle_file(args[0], Results.File.WRITE)
pass
@parse_args(r'\"(.+?)\", \{.+?, \.\.\.\}', True)
def handle_stat(self, args, _result):
self._handle_file(args[0], Results.File.TOUCHED)
@parse_args(r'^\"(.+?)\", \"(.+?)\"$', True)
def handle_symlink(self, args, _result):
self._handle_file(args[0], Results.File.TOUCHED)
self._handle_file(args[1], Results.File.WRITE)
@parse_args(r'^\"(.+?)\", \d+', True)
def handle_truncate(self, args, _result):
self._handle_file(args[0], Results.File.WRITE)
def handle_unlink(self, _args, _result):
# In theory, the file had to be created anyway.
pass
def handle_unlinkat(self, _args, _result):
# In theory, the file had to be created anyway.
pass
def handle_statfs(self, _args, _result):
pass
def handle_utimensat(self, _args, _result):
pass
def handle_vfork(self, _args, result):
self._handling_forking('vfork', result)
@staticmethod
def _handle_unknown(function, args, result):
raise TracingFailure(
'Unexpected/unimplemented trace %s(%s)= %s' %
(function, args, result),
None, None, None)
def _handling_forking(self, name, result):
"""Transfers cwd."""
if result.startswith(('?', '-1')):
# The call failed.
return
# Update the other process right away.
childpid = int(result)
child = self._root().get_or_set_proc(childpid)
if child.parentid is not None or childpid in self.children:
raise TracingFailure(
'Found internal inconsitency in process lifetime detection '
'during a %s() call' % name,
None, None, None)
# Copy the cwd object.
child.initial_cwd = self.get_cwd()
child.parentid = self.pid
# It is necessary because the logs are processed out of order.
self.children.append(child)
def _handle_file(self, filepath, mode):
filepath = self._mangle(filepath)
self.add_file(filepath, mode)
def _mangle(self, filepath):
"""Decodes a filepath found in the log and convert it to a late-bound
path if necessary.
|filepath| is an strace 'encoded' string and the returned value is
either an unicode string if the path was absolute or a late bound path
otherwise.
"""
filepath = Strace.load_filename(filepath)
if os.path.isabs(filepath):
return filepath
else:
if isinstance(self.get_cwd(), unicode):
return os.path.normpath(os.path.join(self.get_cwd(), filepath))
return self.RelativePath(self.get_cwd(), filepath)
def __init__(self, blacklist, root_pid, initial_cwd):
"""|root_pid| may be None when the root process is not known.
In that case, a search is done after reading all the logs to figure out
the root process.
"""
super(Strace.Context, self).__init__(blacklist)
assert_is_renderable(initial_cwd)
self.root_pid = root_pid
self.initial_cwd = initial_cwd
def render(self):
"""Returns the string value of the initial cwd of the root process.
Used by RelativePath.
"""
return self.initial_cwd
def on_line(self, pid, line):
"""Transfers control into the Process.on_line() function."""
self.get_or_set_proc(pid).on_line(line.strip())
def to_results(self):
"""If necessary, finds back the root process and verify consistency."""
if not self.root_pid:
# The non-sudo case. The traced process was started by strace itself,
# so the pid of the traced process is not known.
root = [p for p in self._process_lookup.itervalues() if not p.parentid]
if len(root) == 1:
self.root_process = root[0]
# Save it for later.
self.root_pid = self.root_process.pid
else:
# The sudo case. The traced process was started manually so its pid is
# known.
self.root_process = self._process_lookup.get(self.root_pid)
if not self.root_process:
raise TracingFailure(
'Found internal inconsitency in process lifetime detection '
'while finding the root process',
None,
None,
None,
self.root_pid,
sorted(self._process_lookup))
process = self.root_process.to_results_process()
if sorted(self._process_lookup) != sorted(p.pid for p in process.all):
raise TracingFailure(
'Found internal inconsitency in process lifetime detection '
'while looking for len(tree) == len(list)',
None,
None,
None,
sorted(self._process_lookup),
sorted(p.pid for p in process.all))
return Results(process)
def get_or_set_proc(self, pid):
"""Returns the Context.Process instance for this pid or creates a new one.
"""
if not pid or not isinstance(pid, int):
raise TracingFailure(
'Unpexpected value for pid: %r' % pid,
pid,
None,
None,
pid)
if pid not in self._process_lookup:
self._process_lookup[pid] = self.Process(self, pid)
return self._process_lookup[pid]
@classmethod
def traces(cls):
"""Returns the list of all handled traces to pass this as an argument to
strace.
"""
prefix = 'handle_'
return [i[len(prefix):] for i in dir(cls.Process) if i.startswith(prefix)]
class Tracer(ApiBase.Tracer):
MAX_LEN = 256
def __init__(self, logname, use_sudo):
super(Strace.Tracer, self).__init__(logname)
self.use_sudo = use_sudo
if use_sudo:
# TODO(maruel): Use the jump script systematically to make it easy to
# figure out the root process, so RelativePath is not necessary anymore.
self._child_script = create_exec_thunk()
self._scripts_to_cleanup.append(self._child_script)
def trace(self, cmd, cwd, tracename, output):
"""Runs strace on an executable.
When use_sudo=True, it is a 3-phases process: start the thunk, start
sudo strace with the pid of the thunk and then have the thunk os.execve()
the process to trace.
"""
logging.info('trace(%s, %s, %s, %s)' % (cmd, cwd, tracename, output))
assert os.path.isabs(cmd[0]), cmd[0]
assert os.path.isabs(cwd), cwd
assert os.path.normpath(cwd) == cwd, cwd
with self._lock:
if not self._initialized:
raise TracingFailure(
'Called Tracer.trace() on an unitialized object',
None, None, None, tracename)
assert tracename not in (i['trace'] for i in self._traces)
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
# Ensure all file related APIs are hooked.
traces = ','.join(Strace.Context.traces() + ['file'])
flags = [
# Each child process has its own trace file. It is necessary because
# strace may generate corrupted log file if multiple processes are
# heavily doing syscalls simultaneously.
'-ff',
# Reduce whitespace usage.
'-a1',
# hex encode non-ascii strings.
# TODO(maruel): '-x',
# TODO(maruel): '-ttt',
# Signals are unnecessary noise here. Note the parser can cope with them
# but reduce the unnecessary output.
'-esignal=none',
# Print as much data as wanted.
'-s', '%d' % self.MAX_LEN,
'-e', 'trace=%s' % traces,
'-o', self._logname + '.' + tracename,
]
if self.use_sudo:
pipe_r, pipe_w = os.pipe()
# Start the child process paused.
target_cmd = [sys.executable, self._child_script, str(pipe_r)] + cmd
logging.debug(' '.join(target_cmd))
child_proc = subprocess.Popen(
target_cmd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
cwd=cwd)
# TODO(maruel): both processes must use the same UID for it to work
# without sudo. Look why -p is failing at the moment without sudo.
trace_cmd = [
'sudo',
'strace',
'-p', str(child_proc.pid),
] + flags
logging.debug(' '.join(trace_cmd))
strace_proc = subprocess.Popen(
trace_cmd,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = strace_proc.stderr.readline()
if not re.match(r'^Process \d+ attached \- interrupt to quit$', line):
# TODO(maruel): Raise an exception.
assert False, line
# Now fire the child process.
os.write(pipe_w, 'x')
out = child_proc.communicate()[0]
strace_out = strace_proc.communicate()[0]
# TODO(maruel): if strace_proc.returncode: Add an exception.
saved_out = strace_out if strace_proc.returncode else out
root_pid = child_proc.pid
else:
# Non-sudo case.
trace_cmd = [
'strace',
] + flags + cmd
logging.debug(' '.join(trace_cmd))
child_proc = subprocess.Popen(
trace_cmd,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
out = child_proc.communicate()[0]
# TODO(maruel): Walk the logs and figure out the root process would
# simplify parsing the logs a *lot*.
saved_out = out
# The trace reader will have to figure out.
root_pid = None
with self._lock:
assert tracename not in (i['trace'] for i in self._traces)
self._traces.append(
{
'cmd': cmd,
'cwd': cwd,
'output': saved_out,
'pid': root_pid,
'trace': tracename,
})
return child_proc.returncode, out
def __init__(self, use_sudo=None):
super(Strace, self).__init__()
self.use_sudo = use_sudo
def get_tracer(self, logname):
return self.Tracer(logname, self.use_sudo)
@staticmethod
def clean_trace(logname):
if os.path.isfile(logname):
os.remove(logname)
# Also delete any pid specific file from previous traces.
for i in glob.iglob(logname + '.*'):
if i.rsplit('.', 1)[1].isdigit():
os.remove(i)
@classmethod
def parse_log(cls, logname, blacklist, trace_name):
logging.info('parse_log(%s, ..., %s)', logname, trace_name)
assert os.path.isabs(logname)
data = read_json(logname)
out = []
for item in data['traces']:
if trace_name and item['trace'] != trace_name:
continue
result = {
'output': item['output'],
'trace': item['trace'],
}
try:
context = cls.Context(blacklist, item['pid'], item['cwd'])
for pidfile in glob.iglob('%s.%s.*' % (logname, item['trace'])):
logging.debug('Reading %s', pidfile)
pid = pidfile.rsplit('.', 1)[1]
if pid.isdigit():
pid = int(pid)
found_line = False
for line in open(pidfile, 'rb'):
context.on_line(pid, line)
found_line = True
if not found_line:
# Ensures that a completely empty trace still creates the
# corresponding Process instance by logging a dummy line.
context.on_line(pid, '')
else:
logging.warning('Found unexpected file %s', pidfile)
result['results'] = context.to_results()
except TracingFailure:
result['exception'] = sys.exc_info()
out.append(result)
return out
class Dtrace(ApiBase):
"""Uses DTrace framework through dtrace. Requires root access.
Implies Mac OSX.
dtruss can't be used because it has compatibility issues with python.
Also, the pid->cwd handling needs to be done manually since OSX has no way to
get the absolute path of the 'cwd' dtrace variable from the probe.
Also, OSX doesn't populate curpsinfo->pr_psargs properly, see
https://discussions.apple.com/thread/1980539. So resort to handling execve()
manually.
errno is not printed in the log since this implementation currently only cares
about files that were successfully opened.
"""
class Context(ApiBase.Context):
# Format: index pid function(args)
RE_HEADER = re.compile(r'^\d+ (\d+) ([a-zA-Z_\-]+)\((.*?)\)$')
# Arguments parsing.
RE_DTRACE_BEGIN = re.compile(r'^\"(.+?)\"$')
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[(\d+), (.+)\]$')
RE_OPEN = re.compile(r'^\"(.+?)\", (0x[0-9a-z]+), (0x[0-9a-z]+)$')
RE_PROC_START = re.compile(r'^(\d+), \"(.+?)\", (\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
# O_DIRECTORY is not defined on Windows and dtrace doesn't exist on Windows.
O_DIRECTORY = os.O_DIRECTORY if hasattr(os, 'O_DIRECTORY') else None
O_RDWR = os.O_RDWR
O_WRONLY = os.O_WRONLY
class Process(ApiBase.Context.Process):
def __init__(self, *args):
super(Dtrace.Context.Process, self).__init__(*args)
self.cwd = self.initial_cwd
def __init__(self, blacklist, thunk_pid, initial_cwd):
logging.info(
'%s(%d, %s)' % (self.__class__.__name__, thunk_pid, initial_cwd))
super(Dtrace.Context, self).__init__(blacklist)
assert isinstance(initial_cwd, unicode), initial_cwd
# Process ID of the temporary script created by create_subprocess_thunk().
self._thunk_pid = thunk_pid
self._initial_cwd = initial_cwd
self._line_number = 0
def on_line(self, line):
assert isinstance(line, unicode), line
self._line_number += 1
match = self.RE_HEADER.match(line)
if not match:
raise TracingFailure(
'Found malformed line: %s' % line,
None,
self._line_number,
line)
fn = getattr(
self,
'handle_%s' % match.group(2).replace('-', '_'),
self._handle_ignored)
# It is guaranteed to succeed because of the regexp. Or at least I thought
# it would.
pid = int(match.group(1))
try:
return fn(pid, match.group(3))
except TracingFailure, e:
# Hack in the values since the handler could be a static function.
e.pid = pid
e.line = line
e.line_number = self._line_number
# Re-raise the modified exception.
raise
except (KeyError, NotImplementedError, ValueError), e:
raise TracingFailure(
'Trace generated a %s exception: %s' % (
e.__class__.__name__, str(e)),
pid,
self._line_number,
line,
e)
def to_results(self):
process = self.root_process.to_results_process()
# Internal concistency check.
if sorted(self._process_lookup) != sorted(p.pid for p in process.all):
raise TracingFailure(
'Found internal inconsitency in process lifetime detection '
'while looking for len(tree) == len(list)',
None,
None,
None,
sorted(self._process_lookup),
sorted(p.pid for p in process.all))
return Results(process)
def handle_dtrace_BEGIN(self, _pid, args):
if not self.RE_DTRACE_BEGIN.match(args):
raise TracingFailure(
'Found internal inconsitency in dtrace_BEGIN log line',
None, None, None)
def handle_proc_start(self, pid, args):
"""Transfers cwd.
The dtrace script already takes care of only tracing the processes that
are child of the traced processes so there is no need to verify the
process hierarchy.
"""
if pid in self._process_lookup:
raise TracingFailure(
'Found internal inconsitency in proc_start: %d started two times' %
pid,
None, None, None)
match = self.RE_PROC_START.match(args)
if not match:
raise TracingFailure(
'Failed to parse arguments: %s' % args,
None, None, None)
ppid = int(match.group(1))
if ppid == self._thunk_pid and not self.root_process:
proc = self.root_process = self.Process(
self.blacklist, pid, self._initial_cwd)
elif ppid in self._process_lookup:
proc = self.Process(self.blacklist, pid, self._process_lookup[ppid].cwd)
self._process_lookup[ppid].children.append(proc)
else:
# Another process tree, ignore.
return
self._process_lookup[pid] = proc
logging.debug(
'New child: %s -> %d cwd:%s' %
(ppid, pid, render(proc.initial_cwd)))
def handle_proc_exit(self, pid, _args):
"""Removes cwd."""
if pid in self._process_lookup:
# self._thunk_pid is not traced itself and other traces run neither.
self._process_lookup[pid].cwd = None
def handle_execve(self, pid, args):
"""Sets the process' executable.
TODO(maruel): Read command line arguments. See
https://discussions.apple.com/thread/1980539 for an example.
https://gist.github.com/1242279
Will have to put the answer at http://stackoverflow.com/questions/7556249.
:)
"""
if not pid in self._process_lookup:
# Another process tree, ignore.
return
match = self.RE_EXECVE.match(args)
if not match:
raise TracingFailure(
'Failed to parse arguments: %r' % args,
None, None, None)
proc = self._process_lookup[pid]
proc.executable = match.group(1)
self._handle_file(pid, proc.executable, Results.File.READ)
proc.command = self.process_escaped_arguments(match.group(3))
if int(match.group(2)) != len(proc.command):
raise TracingFailure(
'Failed to parse execve() arguments: %s' % args,
None, None, None)
def handle_chdir(self, pid, args):
"""Updates cwd."""
if pid not in self._process_lookup:
# Another process tree, ignore.
return
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self._process_lookup[pid].cwd, cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
cwd2 = cwd
self._process_lookup[pid].cwd = cwd2
def handle_open_nocancel(self, pid, args):
"""Redirects to handle_open()."""
return self.handle_open(pid, args)
def handle_open(self, pid, args):
if pid not in self._process_lookup:
# Another process tree, ignore.
return
match = self.RE_OPEN.match(args)
if not match:
raise TracingFailure(
'Failed to parse arguments: %s' % args,
None, None, None)
flag = int(match.group(2), 16)
if self.O_DIRECTORY & flag == self.O_DIRECTORY:
# Ignore directories.
return
self._handle_file(
pid,
match.group(1),
Results.File.READ if not ((self.O_RDWR | self.O_WRONLY) & flag)
else Results.File.WRITE)
def handle_rename(self, pid, args):
if pid not in self._process_lookup:
# Another process tree, ignore.
return
match = self.RE_RENAME.match(args)
if not match:
raise TracingFailure(
'Failed to parse arguments: %s' % args,
None, None, None)
self._handle_file(pid, match.group(1), Results.File.READ)
self._handle_file(pid, match.group(2), Results.File.WRITE)
def _handle_file(self, pid, filepath, mode):
if not filepath.startswith('/'):
filepath = os.path.join(self._process_lookup[pid].cwd, filepath)
# We can get '..' in the path.
filepath = os.path.normpath(filepath)
# Sadly, still need to filter out directories here;
# saw open_nocancel(".", 0, 0) = 0 lines.
if os.path.isdir(filepath):
return
self._process_lookup[pid].add_file(filepath, mode)
def handle_ftruncate(self, pid, args):
"""Just used as a signal to kill dtrace, ignoring."""
pass
@staticmethod
def _handle_ignored(pid, args):
"""Is called for all the event traces that are not handled."""
raise NotImplementedError('Please implement me')
@staticmethod
def process_escaped_arguments(text):
"""Extracts escaped arguments on a string and return the arguments as a
list.
Implemented as an automaton.
Example:
With text = '\\001python2.7\\001-c\\001print(\\"hi\\")\\0', the
function will return ['python2.7', '-c', 'print("hi")]
"""
if not text.endswith('\\0'):
raise ValueError('String is not null terminated: %r' % text, text)
text = text[:-2]
def unescape(x):
"""Replaces '\\' with '\' and '\?' (where ? is anything) with ?."""
out = []
escaped = False
for i in x:
if i == '\\' and not escaped:
escaped = True
continue
escaped = False
out.append(i)
return ''.join(out)
return [unescape(i) for i in text.split('\\001')]
class Tracer(ApiBase.Tracer):
# pylint: disable=C0301
#
# To understand the following code, you'll want to take a look at:
# http://developers.sun.com/solaris/articles/dtrace_quickref/dtrace_quickref.html
# https://wikis.oracle.com/display/DTrace/Variables
# http://docs.oracle.com/cd/E19205-01/820-4221/
#
# 0. Dump all the valid probes into a text file. It is important, you
# want to redirect into a file and you don't want to constantly 'sudo'.
# $ sudo dtrace -l > probes.txt
#
# 1. Count the number of probes:
# $ wc -l probes.txt
# 81823 # On OSX 10.7, including 1 header line.
#
# 2. List providers, intentionally skipping all the 'syspolicy10925' and the
# likes and skipping the header with NR>1:
# $ awk 'NR>1 { print $2 }' probes.txt | sort | uniq | grep -v '[[:digit:]]'
# dtrace
# fbt
# io
# ip
# lockstat
# mach_trap
# proc
# profile
# sched
# syscall
# tcp
# vminfo
#
# 3. List of valid probes:
# $ grep syscall probes.txt | less
# or use dtrace directly:
# $ sudo dtrace -l -P syscall | less
#
# trackedpid is an associative array where its value can be 0, 1 or 2.
# 0 is for untracked processes and is the default value for items not
# in the associative array.
# 1 is for tracked processes.
# 2 is for the script created by create_subprocess_thunk() only. It is not
# tracked itself but all its decendants are.
#
# The script will kill itself only once waiting_to_die == 1 and
# current_processes == 0, so that both getlogin() was called and that
# all traced processes exited.
#
# TODO(maruel): Use cacheable predicates. See
# https://wikis.oracle.com/display/DTrace/Performance+Considerations
D_CODE = """
dtrace:::BEGIN {
waiting_to_die = 0;
current_processes = 0;
logindex = 0;
printf("%d %d %s_%s(\\"%s\\")\\n",
logindex, PID, probeprov, probename, SCRIPT);
logindex++;
}
proc:::start /trackedpid[ppid]/ {
trackedpid[pid] = 1;
current_processes += 1;
printf("%d %d %s_%s(%d, \\"%s\\", %d)\\n",
logindex, pid, probeprov, probename,
ppid,
execname,
current_processes);
logindex++;
}
/* Should use SCRIPT but there is no access to this variable at that
* point. */
proc:::start /ppid == PID && execname == "Python"/ {
trackedpid[pid] = 2;
current_processes += 1;
printf("%d %d %s_%s(%d, \\"%s\\", %d)\\n",
logindex, pid, probeprov, probename,
ppid,
execname,
current_processes);
logindex++;
}
proc:::exit /trackedpid[pid] &&
current_processes == 1 &&
waiting_to_die == 1/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d %s_%s(%d)\\n",
logindex, pid, probeprov, probename,
current_processes);
logindex++;
exit(0);
}
proc:::exit /trackedpid[pid]/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d %s_%s(%d)\\n",
logindex, pid, probeprov, probename,
current_processes);
logindex++;
}
/* Use an arcane function to detect when we need to die */
syscall::ftruncate:entry /pid == PID && arg0 == FILE_ID/ {
waiting_to_die = 1;
printf("%d %d %s()\\n", logindex, pid, probefunc);
logindex++;
}
syscall::ftruncate:entry /
pid == PID && arg0 == FILE_ID && current_processes == 0/ {
exit(0);
}
syscall::open*:entry /trackedpid[pid] == 1/ {
self->open_arg0 = arg0;
self->open_arg1 = arg1;
self->open_arg2 = arg2;
}
syscall::open*:return /trackedpid[pid] == 1 && errno == 0/ {
this->open_arg0 = copyinstr(self->open_arg0);
printf("%d %d %s(\\"%s\\", 0x%x, 0x%x)\\n",
logindex, pid, probefunc,
this->open_arg0,
self->open_arg1,
self->open_arg2);
logindex++;
this->open_arg0 = 0;
}
syscall::open*:return /trackedpid[pid] == 1/ {
self->open_arg0 = 0;
self->open_arg1 = 0;
self->open_arg2 = 0;
}
syscall::rename:entry /trackedpid[pid] == 1/ {
self->rename_arg0 = arg0;
self->rename_arg1 = arg1;
}
syscall::rename:return /trackedpid[pid] == 1 && errno == 0/ {
this->rename_arg0 = copyinstr(self->rename_arg0);
this->rename_arg1 = copyinstr(self->rename_arg1);
printf("%d %d %s(\\"%s\\", \\"%s\\")\\n",
logindex, pid, probefunc,
this->rename_arg0,
this->rename_arg1);
logindex++;
this->rename_arg0 = 0;
this->rename_arg1 = 0;
}
syscall::rename:return /trackedpid[pid] == 1/ {
self->rename_arg0 = 0;
self->rename_arg1 = 0;
}
/* Track chdir, it's painful because it is only receiving relative path.
*/
syscall::chdir:entry /trackedpid[pid] == 1/ {
self->chdir_arg0 = arg0;
}
syscall::chdir:return /trackedpid[pid] == 1 && errno == 0/ {
this->chdir_arg0 = copyinstr(self->chdir_arg0);
printf("%d %d %s(\\"%s\\")\\n",
logindex, pid, probefunc,
this->chdir_arg0);
logindex++;
this->chdir_arg0 = 0;
}
syscall::chdir:return /trackedpid[pid] == 1/ {
self->chdir_arg0 = 0;
}
"""
# execve-specific code, tends to throw a lot of exceptions.
D_CODE_EXECVE = """
/* Finally what we care about! */
syscall::exec*:entry /trackedpid[pid]/ {
self->exec_arg0 = copyinstr(arg0);
/* Incrementally probe for a NULL in the argv parameter of execve() to
* figure out argc. */
/* TODO(maruel): Skip the remaining copyin() when a NULL pointer was
* found. */
self->exec_argc = 0;
/* Probe for argc==1 */
this->exec_argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->exec_argc + 1));
self->exec_argc = this->exec_argv[self->exec_argc] ?
(self->exec_argc + 1) : self->exec_argc;
/* Probe for argc==2 */
this->exec_argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->exec_argc + 1));
self->exec_argc = this->exec_argv[self->exec_argc] ?
(self->exec_argc + 1) : self->exec_argc;
/* Probe for argc==3 */
this->exec_argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->exec_argc + 1));
self->exec_argc = this->exec_argv[self->exec_argc] ?
(self->exec_argc + 1) : self->exec_argc;
/* Probe for argc==4 */
this->exec_argv = (user_addr_t*)copyin(
arg1, sizeof(user_addr_t) * (self->exec_argc + 1));
self->exec_argc = this->exec_argv[self->exec_argc] ?
(self->exec_argc + 1) : self->exec_argc;
/* Copy the inputs strings since there is no guarantee they'll be
* present after the call completed. */
self->exec_argv0 = (self->exec_argc > 0) ?
copyinstr(this->exec_argv[0]) : "";
self->exec_argv1 = (self->exec_argc > 1) ?
copyinstr(this->exec_argv[1]) : "";
self->exec_argv2 = (self->exec_argc > 2) ?
copyinstr(this->exec_argv[2]) : "";
self->exec_argv3 = (self->exec_argc > 3) ?
copyinstr(this->exec_argv[3]) : "";
this->exec_argv = 0;
}
syscall::exec*:return /trackedpid[pid] && errno == 0/ {
/* We need to join strings here, as using multiple printf() would
* cause tearing when multiple threads/processes are traced.
* Since it is impossible to escape a string and join it to another one,
* like sprintf("%s%S", previous, more), use hackery.
* Each of the elements are split with a \\1. \\0 cannot be used because
* it is simply ignored. This will conflict with any program putting a
* \\1 in their execve() string but this should be "rare enough" */
this->args = "";
/* Process exec_argv[0] */
this->args = strjoin(
this->args, (self->exec_argc > 0) ? self->exec_argv0 : "");
/* Process exec_argv[1] */
this->args = strjoin(
this->args, (self->exec_argc > 1) ? "\\1" : "");
this->args = strjoin(
this->args, (self->exec_argc > 1) ? self->exec_argv1 : "");
/* Process exec_argv[2] */
this->args = strjoin(
this->args, (self->exec_argc > 2) ? "\\1" : "");
this->args = strjoin(
this->args, (self->exec_argc > 2) ? self->exec_argv2 : "");
/* Process exec_argv[3] */
this->args = strjoin(
this->args, (self->exec_argc > 3) ? "\\1" : "");
this->args = strjoin(
this->args, (self->exec_argc > 3) ? self->exec_argv3 : "");
/* Prints self->exec_argc to permits verifying the internal
* consistency since this code is quite fishy. */
printf("%d %d %s(\\"%s\\", [%d, %S])\\n",
logindex, pid, probefunc,
self->exec_arg0,
self->exec_argc,
this->args);
logindex++;
this->args = 0;
}
syscall::exec*:return /trackedpid[pid]/ {
self->exec_arg0 = 0;
self->exec_argc = 0;
self->exec_argv0 = 0;
self->exec_argv1 = 0;
self->exec_argv2 = 0;
self->exec_argv3 = 0;
}
"""
# Code currently not used.
D_EXTRANEOUS = """
/* This is a good learning experience, since it traces a lot of things
* related to the process and child processes.
* Warning: it generates a gigantic log. For example, tracing
* "data/trace_inputs/child1.py --child" generates a 2mb log and takes
* several minutes to execute.
*/
/*
mach_trap::: /trackedpid[pid] == 1 || trackedpid[ppid]/ {
printf("%d %d %s_%s() = %d\\n",
logindex, pid, probeprov, probefunc, errno);
logindex++;
}
proc::: /trackedpid[pid] == 1 || trackedpid[ppid]/ {
printf("%d %d %s_%s() = %d\\n",
logindex, pid, probeprov, probefunc, errno);
logindex++;
}
sched::: /trackedpid[pid] == 1 || trackedpid[ppid]/ {
printf("%d %d %s_%s() = %d\\n",
logindex, pid, probeprov, probefunc, errno);
logindex++;
}
syscall::: /trackedpid[pid] == 1 || trackedpid[ppid]/ {
printf("%d %d %s_%s() = %d\\n",
logindex, pid, probeprov, probefunc, errno);
logindex++;
}
vminfo::: /trackedpid[pid] == 1 || trackedpid[ppid]/ {
printf("%d %d %s_%s() = %d\\n",
logindex, pid, probeprov, probefunc, errno);
logindex++;
}
*/
/* TODO(maruel): *stat* functions and friends
syscall::access:return,
syscall::chdir:return,
syscall::chflags:return,
syscall::chown:return,
syscall::chroot:return,
syscall::getattrlist:return,
syscall::getxattr:return,
syscall::lchown:return,
syscall::lstat64:return,
syscall::lstat:return,
syscall::mkdir:return,
syscall::pathconf:return,
syscall::readlink:return,
syscall::removexattr:return,
syscall::setxattr:return,
syscall::stat64:return,
syscall::stat:return,
syscall::truncate:return,
syscall::unlink:return,
syscall::utimes:return,
*/
"""
def __init__(self, logname, use_sudo):
"""Starts the log collection with dtrace.
Requires root access or chmod 4555 on dtrace. dtrace is asynchronous so
this needs to wait for dtrace to be "warmed up".
"""
super(Dtrace.Tracer, self).__init__(logname)
# This script is used as a signal to figure out the root process.
self._signal_script = create_subprocess_thunk()
self._scripts_to_cleanup.append(self._signal_script)
# This unique dummy temp file is used to signal the dtrace script that it
# should stop as soon as all the child processes are done. A bit hackish
# but works fine enough.
self._dummy_file_id, self._dummy_file_name = tempfile.mkstemp(
prefix='trace_signal_file')
dtrace_path = '/usr/sbin/dtrace'
if not os.path.isfile(dtrace_path):
dtrace_path = 'dtrace'
elif use_sudo is None and (os.stat(dtrace_path).st_mode & stat.S_ISUID):
# No need to sudo. For those following at home, don't do that.
use_sudo = False
# Note: do not use the -p flag. It's useless if the initial process quits
# too fast, resulting in missing traces from the grand-children. The D
# code manages the dtrace lifetime itself.
trace_cmd = [
dtrace_path,
# Use a larger buffer if getting 'out of scratch space' errors.
# Ref: https://wikis.oracle.com/display/DTrace/Options+and+Tunables
'-b', '10m',
'-x', 'dynvarsize=10m',
#'-x', 'dtrace_global_maxsize=1m',
'-x', 'evaltime=exec',
'-o', '/dev/stderr',
'-q',
'-n', self._get_dtrace_code(),
]
if use_sudo is not False:
trace_cmd.insert(0, 'sudo')
with open(self._logname + '.log', 'wb') as logfile:
self._dtrace = subprocess.Popen(
trace_cmd, stdout=logfile, stderr=subprocess.STDOUT)
logging.debug('Started dtrace pid: %d' % self._dtrace.pid)
# Reads until one line is printed, which signifies dtrace is up and ready.
with open(self._logname + '.log', 'rb') as logfile:
while 'dtrace_BEGIN' not in logfile.readline():
if self._dtrace.poll() is not None:
# Do a busy wait. :/
break
logging.debug('dtrace started')
def _get_dtrace_code(self):
"""Setups the D code to implement child process tracking.
Injects the cookie in the script so it knows when to stop.
The script will detect any instance of the script created with
create_subprocess_thunk() and will start tracing it.
"""
out = (
'inline int PID = %d;\n'
'inline string SCRIPT = "%s";\n'
'inline int FILE_ID = %d;\n'
'\n'
'%s') % (
os.getpid(),
self._signal_script,
self._dummy_file_id,
self.D_CODE)
if os.environ.get('TRACE_INPUTS_DTRACE_ENABLE_EXECVE') == '1':
# Do not enable by default since it tends to spew dtrace: error lines
# because the execve() parameters are not in valid memory at the time of
# logging.
# TODO(maruel): Find a way to make this reliable since it's useful but
# only works in limited/trivial uses cases for now.
out += self.D_CODE_EXECVE
return out
def trace(self, cmd, cwd, tracename, output):
"""Runs dtrace on an executable.
This dtruss is broken when it starts the process itself or when tracing
child processes, this code starts a wrapper process
generated with create_subprocess_thunk() which starts the executable to
trace.
"""
logging.info('trace(%s, %s, %s, %s)' % (cmd, cwd, tracename, output))
assert os.path.isabs(cmd[0]), cmd[0]
assert os.path.isabs(cwd), cwd
assert os.path.normpath(cwd) == cwd, cwd
with self._lock:
if not self._initialized:
raise TracingFailure(
'Called Tracer.trace() on an unitialized object',
None, None, None, tracename)
assert tracename not in (i['trace'] for i in self._traces)
# Starts the script wrapper to start the child process. This signals the
# dtrace script that this process is to be traced.
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
child_cmd = [
sys.executable,
self._signal_script,
tracename,
]
# Call a dummy function so that dtrace knows I'm about to launch a process
# that needs to be traced.
# Yummy.
child = subprocess.Popen(
child_cmd + fix_python_path(cmd),
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
cwd=cwd)
logging.debug('Started child pid: %d' % child.pid)
out = child.communicate()[0]
# This doesn't mean tracing is done, one of the grand-child process may
# still be alive. It will be tracked with the dtrace script.
with self._lock:
assert tracename not in (i['trace'] for i in self._traces)
self._traces.append(
{
'cmd': cmd,
'cwd': cwd,
'pid': child.pid,
'output': out,
'trace': tracename,
})
return child.returncode, out
def close(self, timeout=None):
"""Terminates dtrace."""
logging.debug('close(%s)' % timeout)
try:
try:
super(Dtrace.Tracer, self).close(timeout)
# Signal dtrace that it should stop now.
# ftruncate doesn't exist on Windows.
os.ftruncate(self._dummy_file_id, 0) # pylint: disable=E1101
if timeout:
start = time.time()
# Use polling. :/
while (self._dtrace.poll() is None and
(time.time() - start) < timeout):
time.sleep(0.1)
self._dtrace.kill()
self._dtrace.wait()
finally:
# Make sure to kill it in any case.
if self._dtrace.poll() is None:
try:
self._dtrace.kill()
self._dtrace.wait()
except OSError:
pass
if self._dtrace.returncode != 0:
# Warn about any dtrace failure but basically ignore it.
print 'dtrace failure: %s' % self._dtrace.returncode
finally:
os.close(self._dummy_file_id)
os.remove(self._dummy_file_name)
def post_process_log(self):
"""Sorts the log back in order when each call occured.
dtrace doesn't save the buffer in strict order since it keeps one buffer
per CPU.
"""
super(Dtrace.Tracer, self).post_process_log()
logname = self._logname + '.log'
with open(logname, 'rb') as logfile:
lines = [l for l in logfile if l.strip()]
errors = [l for l in lines if l.startswith('dtrace:')]
if errors:
raise TracingFailure(
'Found errors in the trace: %s' % '\n'.join(errors),
None, None, None, logname)
try:
lines = sorted(lines, key=lambda l: int(l.split(' ', 1)[0]))
except ValueError:
raise TracingFailure(
'Found errors in the trace: %s' % '\n'.join(
l for l in lines if l.split(' ', 1)[0].isdigit()),
None, None, None, logname)
with open(logname, 'wb') as logfile:
logfile.write(''.join(lines))
def __init__(self, use_sudo=None):
super(Dtrace, self).__init__()
self.use_sudo = use_sudo
def get_tracer(self, logname):
return self.Tracer(logname, self.use_sudo)
@staticmethod
def clean_trace(logname):
for ext in ('', '.log'):
if os.path.isfile(logname + ext):
os.remove(logname + ext)
@classmethod
def parse_log(cls, logname, blacklist, trace_name):
logging.info('parse_log(%s, ..., %s)', logname, trace_name)
assert os.path.isabs(logname)
def blacklist_more(filepath):
# All the HFS metadata is in the form /.vol/...
return blacklist(filepath) or re.match(r'^\/\.vol\/.+$', filepath)
data = read_json(logname)
out = []
for item in data['traces']:
if trace_name and item['trace'] != trace_name:
continue
result = {
'output': item['output'],
'trace': item['trace'],
}
try:
context = cls.Context(blacklist_more, item['pid'], item['cwd'])
# It's fine to assume the file as UTF-8: OSX enforces the file names to
# be valid UTF-8 and we control the log output.
for line in codecs.open(logname + '.log', 'rb', encoding='utf-8'):
context.on_line(line)
result['results'] = context.to_results()
except TracingFailure:
result['exception'] = sys.exc_info()
out.append(result)
return out
class LogmanTrace(ApiBase):
"""Uses the native Windows ETW based tracing functionality to trace a child
process.
Caveat: this implementations doesn't track cwd or initial_cwd. It is because
the Windows Kernel doesn't have a concept of 'current working directory' at
all. A Win32 process has a map of current directories, one per drive letter
and it is managed by the user mode kernel32.dll. In kernel, a file is always
opened relative to another file_object or as an absolute path. All the current
working directory logic is done in user mode.
"""
class Context(ApiBase.Context):
"""Processes a ETW log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
# These indexes are for the stripped version in json.
EVENT_NAME = 0
TYPE = 1
PID = 2
TID = 3
PROCESSOR_ID = 4
TIMESTAMP = 5
USER_DATA = 6
class Process(ApiBase.Context.Process):
def __init__(self, *args):
super(LogmanTrace.Context.Process, self).__init__(*args)
# Handle file objects that succeeded.
self.file_objects = {}
def __init__(self, blacklist, thunk_pid, trace_name, thunk_cmd):
logging.info(
'%s(%d, %s, %s)', self.__class__.__name__, thunk_pid, trace_name,
thunk_cmd)
super(LogmanTrace.Context, self).__init__(blacklist)
self._drive_map = DosDriveMap()
# Threads mapping to the corresponding process id.
self._threads_active = {}
# Process ID of the tracer, e.g. the temporary script created by
# create_subprocess_thunk(). This is tricky because the process id may
# have been reused.
self._thunk_pid = thunk_pid
self._thunk_cmd = thunk_cmd
self._trace_name = trace_name
self._line_number = 0
self._thunk_process = None
def on_line(self, line):
"""Processes a json Event line."""
self._line_number += 1
try:
# By Opcode
handler = getattr(
self,
'handle_%s_%s' % (line[self.EVENT_NAME], line[self.TYPE]),
None)
if not handler:
raise TracingFailure(
'Unexpected event %s_%s' % (
line[self.EVENT_NAME], line[self.TYPE]),
None, None, None)
handler(line)
except TracingFailure, e:
# Hack in the values since the handler could be a static function.
e.pid = line[self.PID]
e.line = line
e.line_number = self._line_number
# Re-raise the modified exception.
raise
except (KeyError, NotImplementedError, ValueError), e:
raise TracingFailure(
'Trace generated a %s exception: %s' % (
e.__class__.__name__, str(e)),
line[self.PID],
self._line_number,
line,
e)
def to_results(self):
if not self.root_process:
raise TracingFailure(
'Failed to detect the initial process %d' % self._thunk_pid,
None, None, None)
process = self.root_process.to_results_process()
return Results(process)
def _thread_to_process(self, tid):
"""Finds the process from the thread id."""
tid = int(tid, 16)
pid = self._threads_active.get(tid)
if not pid or not self._process_lookup.get(pid):
return
return self._process_lookup[pid]
@classmethod
def handle_EventTrace_Header(cls, line):
"""Verifies no event was dropped, e.g. no buffer overrun occured."""
BUFFER_SIZE = cls.USER_DATA
#VERSION = cls.USER_DATA + 1
#PROVIDER_VERSION = cls.USER_DATA + 2
#NUMBER_OF_PROCESSORS = cls.USER_DATA + 3
#END_TIME = cls.USER_DATA + 4
#TIMER_RESOLUTION = cls.USER_DATA + 5
#MAX_FILE_SIZE = cls.USER_DATA + 6
#LOG_FILE_MODE = cls.USER_DATA + 7
#BUFFERS_WRITTEN = cls.USER_DATA + 8
#START_BUFFERS = cls.USER_DATA + 9
#POINTER_SIZE = cls.USER_DATA + 10
EVENTS_LOST = cls.USER_DATA + 11
#CPU_SPEED = cls.USER_DATA + 12
#LOGGER_NAME = cls.USER_DATA + 13
#LOG_FILE_NAME = cls.USER_DATA + 14
#BOOT_TIME = cls.USER_DATA + 15
#PERF_FREQ = cls.USER_DATA + 16
#START_TIME = cls.USER_DATA + 17
#RESERVED_FLAGS = cls.USER_DATA + 18
#BUFFERS_LOST = cls.USER_DATA + 19
#SESSION_NAME_STRING = cls.USER_DATA + 20
#LOG_FILE_NAME_STRING = cls.USER_DATA + 21
if line[EVENTS_LOST] != '0':
raise TracingFailure(
( '%s events were lost during trace, please increase the buffer '
'size from %s') % (line[EVENTS_LOST], line[BUFFER_SIZE]),
None, None, None)
def handle_FileIo_Cleanup(self, line):
"""General wisdom: if a file is closed, it's because it was opened.
Note that FileIo_Close is not used since if a file was opened properly but
not closed before the process exits, only Cleanup will be logged.
"""
#IRP = self.USER_DATA
TTID = self.USER_DATA + 1 # Thread ID, that's what we want.
FILE_OBJECT = self.USER_DATA + 2
#FILE_KEY = self.USER_DATA + 3
proc = self._thread_to_process(line[TTID])
if not proc:
# Not a process we care about.
return
file_object = line[FILE_OBJECT]
if file_object in proc.file_objects:
filepath, access_type = proc.file_objects.pop(file_object)
proc.add_file(filepath, access_type)
def handle_FileIo_Create(self, line):
"""Handles a file open.
All FileIo events are described at
http://msdn.microsoft.com/library/windows/desktop/aa363884.aspx
for some value of 'description'.
" (..) process and thread id values of the IO events (..) are not valid "
http://msdn.microsoft.com/magazine/ee358703.aspx
The FileIo.Create event doesn't return if the CreateFile() call
succeeded, so keep track of the file_object and check that it is
eventually closed with FileIo_Cleanup.
"""
#IRP = self.USER_DATA
TTID = self.USER_DATA + 1 # Thread ID, that's what we want.
FILE_OBJECT = self.USER_DATA + 2
#CREATE_OPTIONS = self.USER_DATA + 3
#FILE_ATTRIBUTES = self.USER_DATA + 4
#SHARE_ACCESS = self.USER_DATA + 5
OPEN_PATH = self.USER_DATA + 6
proc = self._thread_to_process(line[TTID])
if not proc:
# Not a process we care about.
return
raw_path = line[OPEN_PATH]
# Ignore directories and bare drive right away.
if raw_path.endswith(os.path.sep):
return
filepath = self._drive_map.to_win32(raw_path)
# Ignore bare drive right away. Some may still fall through with format
# like '\\?\X:'
if len(filepath) == 2:
return
file_object = line[FILE_OBJECT]
if os.path.isdir(filepath):
# There is no O_DIRECTORY equivalent on Windows. The closed is
# FILE_FLAG_BACKUP_SEMANTICS but it's not exactly right either. So
# simply discard directories are they are found.
return
# Override any stale file object.
# TODO(maruel): Figure out a way to detect if the file was opened for
# reading or writting. Sadly CREATE_OPTIONS doesn't seem to be of any help
# here. For now mark as None to make it clear we have no idea what it is
# about.
proc.file_objects[file_object] = (filepath, None)
def handle_FileIo_Rename(self, line):
# TODO(maruel): Handle?
pass
def handle_Process_End(self, line):
pid = line[self.PID]
if self._process_lookup.get(pid):
logging.info('Terminated: %d' % pid)
self._process_lookup[pid] = None
else:
logging.debug('Terminated: %d' % pid)
if self._thunk_process and self._thunk_process.pid == pid:
self._thunk_process = None
def handle_Process_Start(self, line):
"""Handles a new child process started by PID."""
#UNIQUE_PROCESS_KEY = self.USER_DATA
PROCESS_ID = self.USER_DATA + 1
#PARENT_PID = self.USER_DATA + 2
#SESSION_ID = self.USER_DATA + 3
#EXIT_STATUS = self.USER_DATA + 4
#DIRECTORY_TABLE_BASE = self.USER_DATA + 5
#USER_SID = self.USER_DATA + 6
IMAGE_FILE_NAME = self.USER_DATA + 7
COMMAND_LINE = self.USER_DATA + 8
ppid = line[self.PID]
pid = int(line[PROCESS_ID], 16)
command_line = CommandLineToArgvW(line[COMMAND_LINE])
logging.debug(
'New process %d->%d (%s) %s' %
(ppid, pid, line[IMAGE_FILE_NAME], command_line))
if pid == self._thunk_pid:
# Need to ignore processes we don't know about because the log is
# system-wide. self._thunk_pid shall start only one process.
# This is tricky though because Windows *loves* to reuse process id and
# it happens often that the process ID of the thunk script created by
# create_subprocess_thunk() is reused. So just detecting the pid here is
# not sufficient, we must confirm the command line.
if command_line[:len(self._thunk_cmd)] != self._thunk_cmd:
logging.info(
'Ignoring duplicate pid %d for %s: %s while searching for %s',
pid, self._trace_name, command_line, self._thunk_cmd)
return
# TODO(maruel): The check is quite weak. Add the thunk path.
if self._thunk_process:
raise TracingFailure(
( 'Parent process is _thunk_pid(%d) but thunk_process(%d) is '
'already set') % (self._thunk_pid, self._thunk_process.pid),
None, None, None)
proc = self.Process(self.blacklist, pid, None)
self._thunk_process = proc
return
elif ppid == self._thunk_pid and self._thunk_process:
proc = self.Process(self.blacklist, pid, None)
self.root_process = proc
ppid = None
elif self._process_lookup.get(ppid):
proc = self.Process(self.blacklist, pid, None)
self._process_lookup[ppid].children.append(proc)
else:
# Ignore
return
self._process_lookup[pid] = proc
proc.command = command_line
proc.executable = line[IMAGE_FILE_NAME]
# proc.command[0] may be the absolute path of 'executable' but it may be
# anything else too. If it happens that command[0] ends with executable,
# use it, otherwise defaults to the base name.
cmd0 = proc.command[0].lower()
if not cmd0.endswith('.exe'):
# TODO(maruel): That's not strictly true either.
cmd0 += '.exe'
if cmd0.endswith(proc.executable) and os.path.isfile(cmd0):
# Fix the path.
cmd0 = cmd0.replace('/', os.path.sep)
cmd0 = os.path.normpath(cmd0)
proc.executable = get_native_path_case(cmd0)
logging.info(
'New child: %s -> %d %s' % (ppid, pid, proc.executable))
def handle_Thread_End(self, line):
"""Has the same parameters as Thread_Start."""
tid = int(line[self.TID], 16)
self._threads_active.pop(tid, None)
def handle_Thread_Start(self, line):
"""Handles a new thread created.
Do not use self.PID here since a process' initial thread is created by
the parent process.
"""
PROCESS_ID = self.USER_DATA
TTHREAD_ID = self.USER_DATA + 1
#STACK_BASE = self.USER_DATA + 2
#STACK_LIMIT = self.USER_DATA + 3
#USER_STACK_BASE = self.USER_DATA + 4
#USER_STACK_LIMIT = self.USER_DATA + 5
#AFFINITY = self.USER_DATA + 6
#WIN32_START_ADDR = self.USER_DATA + 7
#TEB_BASE = self.USER_DATA + 8
#SUB_PROCESS_TAG = self.USER_DATA + 9
#BASE_PRIORITY = self.USER_DATA + 10
#PAGE_PRIORITY = self.USER_DATA + 11
#IO_PRIORITY = self.USER_DATA + 12
#THREAD_FLAGS = self.USER_DATA + 13
# Do not use self.PID here since a process' initial thread is created by
# the parent process.
pid = int(line[PROCESS_ID], 16)
tid = int(line[TTHREAD_ID], 16)
logging.debug('New thread pid:%d, tid:%d' % (pid, tid))
self._threads_active[tid] = pid
@classmethod
def supported_events(cls):
"""Returns all the procesed events."""
out = []
for member in dir(cls):
match = re.match(r'^handle_([A-Za-z]+)_([A-Za-z]+)$', member)
if match:
out.append(match.groups())
return out
class Tracer(ApiBase.Tracer):
# The basic headers.
EXPECTED_HEADER = [
u'Event Name',
u'Type',
u'Event ID',
u'Version',
u'Channel',
u'Level', # 5
u'Opcode',
u'Task',
u'Keyword',
u'PID',
u'TID', # 10
u'Processor Number',
u'Instance ID',
u'Parent Instance ID',
u'Activity ID',
u'Related Activity ID', # 15
u'Clock-Time',
u'Kernel(ms)', # Both have a resolution of ~15ms which makes them
u'User(ms)', # pretty much useless.
u'User Data', # Extra arguments that are event-specific.
]
# Only the useful headers common to all entries are listed there. Any column
# at 19 or higher is dependent on the specific event.
EVENT_NAME = 0
TYPE = 1
PID = 9
TID = 10
PROCESSOR_ID = 11
TIMESTAMP = 16
NULL_GUID = '{00000000-0000-0000-0000-000000000000}'
USER_DATA = 19
class CsvReader(object):
"""CSV reader that reads files generated by tracerpt.exe.
csv.reader() fails to read them properly, it mangles file names quoted
with "" with a comma in it.
"""
# 0. Had a ',' or one of the following ' ' after a comma, next should
# be ' ', '"' or string or ',' for an empty field.
( HAD_DELIMITER,
# 1. Processing an unquoted field up to ','.
IN_STR,
# 2. Processing a new field starting with '"'.
STARTING_STR_QUOTED,
# 3. Second quote in a row at the start of a field. It could be either
# '""foo""' or '""'. Who the hell thought it was a great idea to use
# the same character for delimiting and escaping?
STARTING_SECOND_QUOTE,
# 4. A quote inside a quoted string where the previous character was
# not a quote, so the string is not empty. Can be either: end of a
# quoted string (a delimiter) or a quote escape. The next char must be
# either '"' or ','.
HAD_QUOTE_IN_QUOTED,
# 5. Second quote inside a quoted string.
HAD_SECOND_QUOTE_IN_A_ROW_IN_QUOTED,
# 6. Processing a field that started with '"'.
IN_STR_QUOTED) = range(7)
def __init__(self, f):
self.f = f
def __iter__(self):
return self
def next(self):
"""Splits the line in fields."""
line = self.f.readline()
if not line:
raise StopIteration()
line = line.strip()
fields = []
state = self.HAD_DELIMITER
for i, c in enumerate(line):
if state == self.HAD_DELIMITER:
if c == ',':
# Empty field.
fields.append('')
elif c == ' ':
# Ignore initial whitespaces
pass
elif c == '"':
state = self.STARTING_STR_QUOTED
fields.append('')
else:
# Start of a new field.
state = self.IN_STR
fields.append(c)
elif state == self.IN_STR:
# Do not accept quote inside unquoted field.
assert c != '"', (i, c, line, fields)
if c == ',':
fields[-1] = fields[-1].strip()
state = self.HAD_DELIMITER
else:
fields[-1] = fields[-1] + c
elif state == self.STARTING_STR_QUOTED:
if c == '"':
# Do not store the character yet.
state = self.STARTING_SECOND_QUOTE
else:
state = self.IN_STR_QUOTED
fields[-1] = fields[-1] + c
elif state == self.STARTING_SECOND_QUOTE:
if c == ',':
# It was an empty field. '""' == ''.
state = self.HAD_DELIMITER
else:
fields[-1] = fields[-1] + '"' + c
state = self.IN_STR_QUOTED
elif state == self.HAD_QUOTE_IN_QUOTED:
if c == ',':
# End of the string.
state = self.HAD_DELIMITER
elif c == '"':
state = self.HAD_SECOND_QUOTE_IN_A_ROW_IN_QUOTED
else:
# The previous double-quote was just an unescaped quote.
fields[-1] = fields[-1] + '"' + c
state = self.IN_STR_QUOTED
elif state == self.HAD_SECOND_QUOTE_IN_A_ROW_IN_QUOTED:
if c == ',':
# End of the string.
state = self.HAD_DELIMITER
fields[-1] = fields[-1] + '"'
else:
# That's just how the logger rolls. Revert back to appending the
# char and "guess" it was a quote in a double-quoted string.
state = self.IN_STR_QUOTED
fields[-1] = fields[-1] + '"' + c
elif state == self.IN_STR_QUOTED:
if c == '"':
# Could be a delimiter or an escape.
state = self.HAD_QUOTE_IN_QUOTED
else:
fields[-1] = fields[-1] + c
if state == self.HAD_SECOND_QUOTE_IN_A_ROW_IN_QUOTED:
fields[-1] = fields[-1] + '"'
else:
assert state in (
# Terminated with a normal field.
self.IN_STR,
# Terminated with an empty field.
self.STARTING_SECOND_QUOTE,
# Terminated with a normal quoted field.
self.HAD_QUOTE_IN_QUOTED), (
line, state, fields)
return fields
def __init__(self, logname):
"""Starts the log collection.
Requires administrative access. logman.exe is synchronous so no need for a
"warmup" call. 'Windows Kernel Trace' is *localized* so use its GUID
instead. The GUID constant name is SystemTraceControlGuid. Lovely.
One can get the list of potentially interesting providers with:
"logman query providers | findstr /i file"
"""
super(LogmanTrace.Tracer, self).__init__(logname)
self._signal_script = create_subprocess_thunk()
self._scripts_to_cleanup.append(self._signal_script)
cmd_start = [
'logman.exe',
'start',
'NT Kernel Logger',
'-p', '{9e814aad-3204-11d2-9a82-006008a86939}',
# splitio,fileiocompletion,syscall,file,cswitch,img
'(process,fileio,thread)',
'-o', self._logname + '.etl',
'-ets', # Send directly to kernel
# Values extracted out of thin air.
# Event Trace Session buffer size in kb.
'-bs', '10240',
# Number of Event Trace Session buffers.
'-nb', '16', '256',
]
logging.debug('Running: %s' % cmd_start)
try:
subprocess.check_call(
cmd_start,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
if e.returncode == -2147024891:
print >> sys.stderr, 'Please restart with an elevated admin prompt'
elif e.returncode == -2144337737:
print >> sys.stderr, (
'A kernel trace was already running, stop it and try again')
raise
def trace(self, cmd, cwd, tracename, output):
logging.info('trace(%s, %s, %s, %s)' % (cmd, cwd, tracename, output))
assert os.path.isabs(cmd[0]), cmd[0]
assert os.path.isabs(cwd), cwd
assert os.path.normpath(cwd) == cwd, cwd
with self._lock:
if not self._initialized:
raise TracingFailure(
'Called Tracer.trace() on an unitialized object',
None, None, None, tracename)
assert tracename not in (i['trace'] for i in self._traces)
# Use "logman -?" for help.
stdout = stderr = None
if output:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
# Run the child process.
logging.debug('Running: %s' % cmd)
# Use the temporary script generated with create_subprocess_thunk() so we
# have a clear pid owner. Since trace_inputs.py can be used as a library
# and could trace multiple processes simultaneously, it makes it more
# complex if the executable to be traced is executed directly here. It
# also solves issues related to logman.exe that needs to be executed to
# control the kernel trace.
child_cmd = [
sys.executable,
self._signal_script,
tracename,
]
child = subprocess.Popen(
child_cmd + fix_python_path(cmd),
cwd=cwd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
logging.debug('Started child pid: %d' % child.pid)
out = child.communicate()[0]
# This doesn't mean all the grand-children are done. Sadly, we don't have
# a good way to determine that.
with self._lock:
assert tracename not in (i['trace'] for i in self._traces)
self._traces.append({
'cmd': cmd,
'cwd': cwd,
'output': out,
'pid': child.pid,
# Used to figure out the real process when process ids are reused.
'thunk_cmd': child_cmd,
'trace': tracename,
})
return child.returncode, out
def close(self, _timeout=None):
"""Stops the kernel log collection and converts the traces to text
representation.
"""
with self._lock:
try:
super(LogmanTrace.Tracer, self).close()
finally:
cmd_stop = [
'logman.exe',
'stop',
'NT Kernel Logger',
'-ets', # Sends the command directly to the kernel.
]
logging.debug('Running: %s' % cmd_stop)
subprocess.check_call(
cmd_stop,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def post_process_log(self):
"""Converts the .etl file into .csv then into .json."""
super(LogmanTrace.Tracer, self).post_process_log()
logformat = 'csv'
self._convert_log(logformat)
self._trim_log(logformat)
def _gen_logdata(self):
return {
'format': 'csv',
'traces': self._traces,
}
def _trim_log(self, logformat):
"""Reduces the amount of data in original log by generating a 'reduced'
log.
"""
if logformat == 'csv_utf16':
file_handle = codecs.open(
self._logname + '.' + logformat, 'r', encoding='utf-16')
elif logformat == 'csv':
assert sys.getfilesystemencoding() == 'mbcs'
file_handle = codecs.open(
self._logname + '.' + logformat, 'r',
encoding=get_current_encoding())
supported_events = LogmanTrace.Context.supported_events()
def trim(generator):
"""Loads items from the generator and returns the interesting data.
It filters out any uninteresting line and reduce the amount of data in
the trace.
"""
for index, line in enumerate(generator):
if not index:
if line != self.EXPECTED_HEADER:
raise TracingFailure(
'Found malformed header: %s' % line,
None, None, None)
continue
# As you can see, the CSV is full of useful non-redundant information:
if (line[2] != '0' or # Event ID
line[3] not in ('2', '3') or # Version
line[4] != '0' or # Channel
line[5] != '0' or # Level
line[7] != '0' or # Task
line[8] != '0x0000000000000000' or # Keyword
line[12] != '' or # Instance ID
line[13] != '' or # Parent Instance ID
line[14] != self.NULL_GUID or # Activity ID
line[15] != ''): # Related Activity ID
raise TracingFailure(
'Found unexpected values in line: %s' % ' '.join(line),
None, None, None)
if (line[self.EVENT_NAME], line[self.TYPE]) not in supported_events:
continue
yield [
line[self.EVENT_NAME],
line[self.TYPE],
line[self.PID],
line[self.TID],
line[self.PROCESSOR_ID],
line[self.TIMESTAMP],
] + line[self.USER_DATA:]
# must not convert the trim() call into a list, since it will use too much
# memory for large trace. use a csv file as a workaround since the json
# parser requires a complete in-memory file.
with open('%s.preprocessed' % self._logname, 'wb') as f:
# $ and * can't be used in file name on windows, reducing the likelihood
# of having to escape a string.
out = csv.writer(
f, delimiter='$', quotechar='*', quoting=csv.QUOTE_MINIMAL)
for line in trim(self.CsvReader(file_handle)):
out.writerow([s.encode('utf-8') for s in line])
def _convert_log(self, logformat):
"""Converts the ETL trace to text representation.
Normally, 'csv' is sufficient. If complex scripts are used (like eastern
languages), use 'csv_utf16'. If localization gets in the way, use 'xml'.
Arguments:
- logformat: Text format to be generated, csv, csv_utf16 or xml.
Use "tracerpt -?" for help.
"""
LOCALE_INVARIANT = 0x7F
windll.kernel32.SetThreadLocale(LOCALE_INVARIANT)
cmd_convert = [
'tracerpt.exe',
'-l', self._logname + '.etl',
'-o', self._logname + '.' + logformat,
'-gmt', # Use UTC
'-y', # No prompt
# Use -of XML to get the header of each items after column 19, e.g. all
# the actual headers of 'User Data'.
]
if logformat == 'csv':
# tracerpt localizes the 'Type' column, for major brainfuck
# entertainment. I can't imagine any sane reason to do that.
cmd_convert.extend(['-of', 'CSV'])
elif logformat == 'csv_utf16':
# This causes it to use UTF-16, which doubles the log size but ensures
# the log is readable for non-ASCII characters.
cmd_convert.extend(['-of', 'CSV', '-en', 'Unicode'])
elif logformat == 'xml':
cmd_convert.extend(['-of', 'XML'])
else:
raise ValueError('Unexpected log format \'%s\'' % logformat)
logging.debug('Running: %s' % cmd_convert)
# This can takes tens of minutes for large logs.
# Redirects all output to stderr.
subprocess.check_call(
cmd_convert,
stdin=subprocess.PIPE,
stdout=sys.stderr,
stderr=sys.stderr)
def __init__(self, use_sudo=False): # pylint: disable=W0613
super(LogmanTrace, self).__init__()
# Ignore use_sudo. It's irrelevant on Windows but kept to simplify the API.
@staticmethod
def clean_trace(logname):
for ext in ('', '.csv', '.etl', '.json', '.xml', '.preprocessed'):
if os.path.isfile(logname + ext):
os.remove(logname + ext)
@classmethod
def parse_log(cls, logname, blacklist, trace_name):
logging.info('parse_log(%s, ..., %s)', logname, trace_name)
assert os.path.isabs(logname)
def blacklist_more(filepath):
# All the NTFS metadata is in the form x:\$EXTEND or stuff like that.
return blacklist(filepath) or re.match(r'[A-Z]\:\\\$EXTEND', filepath)
# Create a list of (Context, result_dict) tuples. This is necessary because
# the csv file may be larger than the amount of available memory.
contexes = [
(
cls.Context(
blacklist_more, item['pid'], item['trace'], item['thunk_cmd']),
{
'output': item['output'],
'trace': item['trace'],
},
)
for item in read_json(logname)['traces']
if not trace_name or item['trace'] == trace_name
]
# The log may be too large to fit in memory and it is not efficient to read
# it multiple times, so multiplex the contexes instead, which is slightly
# more awkward.
with open('%s.preprocessed' % logname, 'rb') as f:
lines = csv.reader(
f, delimiter='$', quotechar='*', quoting=csv.QUOTE_MINIMAL)
for encoded in lines:
line = [s.decode('utf-8') for s in encoded]
# Convert the PID in-place from hex.
line[cls.Context.PID] = int(line[cls.Context.PID], 16)
for context in contexes:
if 'exception' in context[1]:
continue
try:
context[0].on_line(line)
except TracingFailure:
context[1]['exception'] = sys.exc_info()
for context in contexes:
if 'exception' in context[1]:
continue
context[1]['results'] = context[0].to_results()
return [context[1] for context in contexes]
def get_api(**kwargs):
"""Returns the correct implementation for the current OS."""
if sys.platform == 'cygwin':
raise NotImplementedError(
'Not implemented for cygwin, start the script from Win32 python')
flavors = {
'win32': LogmanTrace,
'darwin': Dtrace,
'sunos5': Dtrace,
'freebsd7': Dtrace,
'freebsd8': Dtrace,
}
# Defaults to strace.
return flavors.get(sys.platform, Strace)(**kwargs)
def extract_directories(root_dir, files, blacklist):
"""Detects if all the files in a directory are in |files| and if so, replace
the individual files by a Results.Directory instance.
Takes a list of Results.File instances and returns a shorter list of
Results.File and Results.Directory instances.
Arguments:
- root_dir: Optional base directory that shouldn't be search further.
- files: list of Results.File instances.
- blacklist: regexp of files to ignore, for example r'.+\.pyc'.
"""
logging.info(
'extract_directories(%s, %d files, ...)' % (root_dir, len(files)))
assert not (root_dir or '').endswith(os.path.sep), root_dir
# It is important for root_dir to not be a symlinked path, make sure to call
# os.path.realpath() as needed.
assert not root_dir or (
os.path.realpath(get_native_path_case(root_dir)) == root_dir)
assert not any(isinstance(f, Results.Directory) for f in files)
# Remove non existent files.
files = [f for f in files if f.existent]
if not files:
return files
# All files must share the same root, which can be None.
assert len(set(f.root for f in files)) == 1, set(f.root for f in files)
# Creates a {directory: {filename: File}} mapping, up to root.
buckets = {}
if root_dir:
buckets[root_dir] = {}
for fileobj in files:
path = fileobj.full_path
directory = os.path.dirname(path)
assert directory
# Do not use os.path.basename() so trailing os.path.sep is kept.
basename = path[len(directory)+1:]
files_in_directory = buckets.setdefault(directory, {})
files_in_directory[basename] = fileobj
# Add all the directories recursively up to root.
while True:
old_d = directory
directory = os.path.dirname(directory)
if directory + os.path.sep == root_dir or directory == old_d:
break
buckets.setdefault(directory, {})
root_prefix = len(root_dir) + 1 if root_dir else 0
for directory in sorted(buckets, reverse=True):
if not os.path.isdir(directory):
logging.debug(
'%s was a directory but doesn\'t exist anymore; ignoring', directory)
continue
actual = set(f for f in os.listdir(directory) if not blacklist(f))
expected = set(buckets[directory])
if not (actual - expected):
parent = os.path.dirname(directory)
buckets[parent][os.path.basename(directory)] = Results.Directory(
root_dir,
directory[root_prefix:],
False,
sum(f.size for f in buckets[directory].itervalues()),
sum(f.nb_files for f in buckets[directory].itervalues()))
# Remove the whole bucket.
del buckets[directory]
# Reverse the mapping with what remains. The original instances are returned,
# so the cached meta data is kept.
files = sum((x.values() for x in buckets.itervalues()), [])
return sorted(files, key=lambda x: x.path)
def trace(logfile, cmd, cwd, api, output):
"""Traces an executable. Returns (returncode, output) from api.
Arguments:
- logfile: file to write to.
- cmd: command to run.
- cwd: current directory to start the process in.
- api: a tracing api instance.
- output: if True, returns output, otherwise prints it at the console.
"""
cmd = fix_python_path(cmd)
api.clean_trace(logfile)
with api.get_tracer(logfile) as tracer:
return tracer.trace(cmd, cwd, 'default', output)
def CMDclean(args):
"""Cleans up traces."""
parser = OptionParserTraceInputs(command='clean')
options, args = parser.parse_args(args)
api = get_api()
api.clean_trace(options.log)
return 0
def CMDtrace(args):
"""Traces an executable."""
parser = OptionParserTraceInputs(command='trace')
parser.allow_interspersed_args = False
parser.add_option(
'-q', '--quiet', action='store_true',
help='Redirects traced executable output to /dev/null')
parser.add_option(
'-s', '--sudo', action='store_true',
help='Use sudo when shelling out the tracer tool (ignored on Windows)')
parser.add_option(
'-n', '--no-sudo', action='store_false',
help='Don\'t use sudo')
options, args = parser.parse_args(args)
if not args:
parser.error('Please provide a command to run')
if not os.path.isabs(args[0]) and os.access(args[0], os.X_OK):
args[0] = os.path.abspath(args[0])
# options.sudo default value is None, which is to do whatever tracer defaults
# do.
api = get_api(use_sudo=options.sudo)
return trace(options.log, args, os.getcwd(), api, options.quiet)[0]
def CMDread(args):
"""Reads the logs and prints the result."""
parser = OptionParserTraceInputs(command='read')
parser.add_option(
'-V', '--variable',
nargs=2,
action='append',
dest='variables',
metavar='VAR_NAME directory',
default=[],
help=('Variables to replace relative directories against. Example: '
'"-v \'$HOME\' \'/home/%s\'" will replace all occurence of your '
'home dir with $HOME') % getpass.getuser())
parser.add_option(
'--root-dir',
help='Root directory to base everything off it. Anything outside of this '
'this directory will not be reported')
parser.add_option(
'--trace-name',
help='Only reads one of the trace. Defaults to reading all traces')
parser.add_option(
'-j', '--json', action='store_true',
help='Outputs raw result data as json')
parser.add_option(
'-b', '--blacklist', action='append', default=[],
help='List of regexp to use as blacklist filter')
options, args = parser.parse_args(args)
if options.root_dir:
options.root_dir = get_native_path_case(
unicode(os.path.abspath(options.root_dir)))
variables = dict(options.variables)
api = get_api()
def blacklist(f):
return any(re.match(b, f) for b in options.blacklist)
data = api.parse_log(options.log, blacklist, options.trace_name)
# Process each trace.
output_as_json = []
try:
for item in data:
if 'exception' in item:
# Do not abort the other traces.
print >> sys.stderr, (
'Trace %s: Got an exception: %s' % (
item['trace'], item['exception'][1]))
continue
results = item['results']
if options.root_dir:
results = results.strip_root(options.root_dir)
if options.json:
output_as_json.append(results.flatten())
else:
simplified = extract_directories(
options.root_dir, results.files, blacklist)
simplified = [f.replace_variables(variables) for f in simplified]
if len(data) > 1:
print('Trace: %s' % item['trace'])
print('Total: %d' % len(results.files))
print('Non existent: %d' % len(results.non_existent))
for f in results.non_existent:
print(' %s' % f.path)
print(
'Interesting: %d reduced to %d' % (
len(results.existent), len(simplified)))
for f in simplified:
print(' %s' % f.path)
if options.json:
write_json(sys.stdout, output_as_json, False)
except KeyboardInterrupt:
return 1
except IOError as e:
if e.errno == errno.EPIPE:
# Do not print a stack trace when the output is piped to less and the user
# quits before the whole output was written.
return 1
raise
return 0
class OptionParserWithLogging(optparse.OptionParser):
"""Adds --verbose option."""
def __init__(self, verbose=0, **kwargs):
optparse.OptionParser.__init__(self, **kwargs)
self.add_option(
'-v', '--verbose',
action='count',
default=verbose,
help='Use multiple times to increase verbosity')
def parse_args(self, *args, **kwargs):
options, args = optparse.OptionParser.parse_args(self, *args, **kwargs)
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(len(levels)-1, options.verbose)],
format='%(levelname)5s %(module)15s(%(lineno)3d): %(message)s')
return options, args
class OptionParserWithNiceDescription(OptionParserWithLogging):
"""Generates the description with the command's docstring."""
def __init__(self, **kwargs):
"""Sets 'description' and 'usage' if not already specified."""
command = kwargs.pop('command', 'help')
kwargs.setdefault(
'description',
re.sub('[\r\n ]{2,}', ' ', get_command_handler(command).__doc__))
kwargs.setdefault('usage', '%%prog %s [options]' % command)
OptionParserWithLogging.__init__(self, **kwargs)
class OptionParserTraceInputs(OptionParserWithNiceDescription):
"""Adds automatic --log handling."""
def __init__(self, **kwargs):
OptionParserWithNiceDescription.__init__(self, **kwargs)
self.add_option(
'-l', '--log', help='Log file to generate or read, required')
def parse_args(self, *args, **kwargs):
"""Makes sure the paths make sense.
On Windows, / and \ are often mixed together in a path.
"""
options, args = OptionParserWithNiceDescription.parse_args(
self, *args, **kwargs)
if not options.log:
self.error('Must supply a log file with -l')
options.log = os.path.abspath(options.log)
return options, args
def extract_documentation():
"""Returns a dict {command: description} for each of documented command."""
commands = (
fn[3:]
for fn in dir(sys.modules['__main__'])
if fn.startswith('CMD') and get_command_handler(fn[3:]).__doc__)
return dict((fn, get_command_handler(fn).__doc__) for fn in commands)
def CMDhelp(args):
"""Prints list of commands or help for a specific command."""
doc = extract_documentation()
# Calculates the optimal offset.
offset = max(len(cmd) for cmd in doc)
format_str = ' %-' + str(offset + 2) + 's %s'
# Generate a one-liner documentation of each commands.
commands_description = '\n'.join(
format_str % (cmd, doc[cmd].split('\n')[0]) for cmd in sorted(doc))
parser = OptionParserWithNiceDescription(
usage='%prog <command> [options]',
description='Commands are:\n%s\n' % commands_description)
parser.format_description = lambda _: parser.description
# Strip out any -h or --help argument.
_, args = parser.parse_args([i for i in args if not i in ('-h', '--help')])
if len(args) == 1:
if not get_command_handler(args[0]):
parser.error('Unknown command %s' % args[0])
# The command was "%prog help command", replaces ourself with
# "%prog command --help" so help is correctly printed out.
return main(args + ['--help'])
elif args:
parser.error('Unknown argument "%s"' % ' '.join(args))
parser.print_help()
return 0
def get_command_handler(name):
"""Returns the command handler or CMDhelp if it doesn't exist."""
return getattr(sys.modules['__main__'], 'CMD%s' % name, None)
def main_impl(argv):
command = get_command_handler(argv[0] if argv else 'help')
if not command:
return CMDhelp(argv)
return command(argv[1:])
def main(argv):
disable_buffering()
try:
main_impl(argv)
except TracingFailure, e:
sys.stderr.write('\nError: ')
sys.stderr.write(str(e))
sys.stderr.write('\n')
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
windyuuy/opera
|
chromium/src/tools/swarm_client/trace_inputs.py
|
Python
|
bsd-3-clause
| 137,646
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, random, codecs
tags = []
def random_tag():
return tags[random.randint(0, len(tags)-1)]
def emit(B, tag, word):
if word in B[tag]:
return B[tag][word]
#elif word.replace('.', '0', 1).isdigit():
#return 0 if tag == 'ZZ' else float('-inf')
else:
return float('-inf')
def tag(path):
global tags
pi = dict()
A = dict()
B = dict()
with codecs.open("hmmmodel.txt", 'r', 'utf-8') as fin:
line = fin.readline()
while line:
toks = line.strip().split(' ')
tag = toks[0]
tags.append(tag)
pi[tag] = float(toks[1])
A[tag] = dict()
toks = fin.readline().strip().split(' ')
for tok in toks:
ntag = tok[:tok.rfind(':')]
A[tag][ntag] = float(tok[tok.rfind(':')+1:])
B[tag] = dict()
toks = fin.readline().strip().split(' ')
for tok in toks:
word = tok[:tok.rfind(':')]
B[tag][word] = float(tok[tok.rfind(':')+1:])
line = fin.readline()
with codecs.open(path, 'r', 'utf-8') as fin:
with codecs.open('hmmoutput.txt', 'w', 'utf-8') as fout:
line = fin.readline()
while line:
toks = line.strip().split(' ')
backptrs = []
for idx, word in enumerate(toks):
backptrs.append(dict())
flag = False
for tag in tags:
if emit(B, tag, word) != float('-inf'):
flag = True
break
if 0 == idx:
p = dict((tag, pi[tag]+emit(B, tag, word) if flag else pi[tag]) for tag in tags)
else:
tmp = dict()
for tag in tags:
for ptag, pval in p.items():
val = pval+A[ptag][tag]
if tag not in tmp or tmp[tag] < val:
tmp[tag] = val
backptrs[idx][tag] = ptag
if flag:
tmp[tag] += emit(B, tag, word)
p = tmp
mtag = None
for tag in tags:
if mtag is None or p[mtag] < p[tag]:
mtag = tag
tag_seq = [mtag]
for i in range(-1, -len(toks), -1):
tag_seq.append(backptrs[i][tag_seq[-1]])
tag_seq.reverse()
assert len(tag_seq) == len(toks)
for idx, tag in enumerate(tag_seq):
fout.write(toks[idx]+"/"+tag)
if idx == len(tag_seq)-1:
fout.write('\n')
else:
fout.write(' ')
line = fin.readline()
if __name__ == '__main__':
tag(sys.argv[1])
|
DevinZ1993/Pieces-of-Code
|
python/cs544/hmm/hmmdecode.py
|
Python
|
mpl-2.0
| 3,101
|
from lcc.stars_processing.utilities.base_descriptor import BaseDescriptor
class AbbeValueDescr(BaseDescriptor):
"""
AbbeValueDescr describes stars by Abbe values
Attributes
----------
bins : int
Dimension of reduced light curve from which Abbe value
is calculated
"""
LABEL = "Abbe value"
LC_NEEDED = True
def __init__(self, bins=None):
"""
Parameters
----------
bins : int
Dimension of reduced light curve from which Abbe value
is calculated
"""
self.bins = bins
def getFeatures(self, star):
"""
Get Abbe value
Parameters
-----------
star : lcc.entities.star.Star object
Star to process
Returns
-------
float
Abbe value of the investigated star
"""
if not self.bins:
bins = len(star.lightCurve.time)
else:
bins = self.bins
return star.lightCurve.getAbbe(bins=bins)
|
mavrix93/LightCurvesClassifier
|
lcc/stars_processing/descriptors/abbe_value_descr.py
|
Python
|
mit
| 1,050
|
from collections import OrderedDict
from .country import Country
class Index(object):
"""
Generate index based on the position in bytes
of every letter in the alphabet.
The index is stored in an OrderedDict.
It's a lazy index, the index is not generated
till the first call.
"""
def __init__(self, country='us'):
self.indices = []
self.country = Country(country)
def __call__(self, letter):
if len(self.indices) == 0:
self._generate_index()
return self.indices[letter.upper()]
def _generate_index(self):
self.indices = OrderedDict()
with open(self.country.file()) as file:
total = file.readline() # Omit headers line
for line in file:
if line[0] not in self.indices:
self.indices[line[0]] = len(total)
total = total + line
if __name__ == "__main__":
print(__doc__)
|
malev/gender-detector
|
gender_detector/index.py
|
Python
|
gpl-2.0
| 952
|
# Author: Xavier Paredes-Fortuny (xparedesfortuny@gmail.com)
# License: MIT, see LICENSE.md
import numpy as np
def resamp_line(all_lines, resamp, a, CGS_units):
"""Downsamples the line to 'resamp' cells.
All the lines should have the same lenght with a constant
cell size.
We are not averaging the data in the same bin because we are
dealing with relativistic quantities. E.g. to average the
density we should first average the number of particles, then
average the volume in the lab frame and then compute again the
density.
If needed dx and dy of the new sampling can be computed as the
projection of dl, i.e. dl*cos theta, where theta con be
computed from vx and vy.
After resampling you may notice that there is a gap at the top
of the lines map, which is different for each line. The
largest gap corresponds to the largest line (before resampling)
and its size it's just the bin size. The reason is that we are
keeping for each bin the first value of the bin, e.g. x[0], the
opposite would occure (i.e. a gap at the bottom of the map) if
we would keep the last value of the bin, e.g. x[-1]. The gap
looks shorter for the other lines because in that case, the gap
continues outside the grid (with 0 values)"""
# Length of the largest line and maximum number of points
sc_max = 0.
N_max = 0
for line_values in all_lines:
xc = zip(*line_values)[0]
yc = zip(*line_values)[1]
sc = 0.
for i in range(len(xc)-1):
dxc = xc[i+1]-xc[i]
dyc = yc[i+1]-yc[i]
sc += np.sqrt(dxc**2.+dyc**2.)
if sc > sc_max:
sc_max = sc
if len(xc) > N_max:
N_max = len(xc)
if N_max <= resamp:
raise RuntimeError('Resampling value too big')
# Cell size (fixed for all lines)
dl = sc_max/resamp
#print dl
# Resampled line grid, with resamp+1 vertices
l = [-1.]
l_aux = 0.
for k in range(resamp):
l_aux += dl
l += [l_aux]
l[-1] += dl
# Save line lenght info
with open('L_dL.dat', 'w') as f:
if CGS_units == 0:
f.write(str(l[-1]-dl) + ' ' + str(dl) + '\n')
else:
f.write(str((l[-1]-dl)*a) + ' ' + str(dl*a) + '\n')
all_lines_new = []
# Regrouping of the physical parameters in the new grid
for line_values in all_lines:
xc, yc, ic, jc, densc, epsc, vxc, vyc, divc, tracerc, tc = zip(*line_values)
# Line grid before resampling
sc = [0.]
sc_aux = 0.
for i in range(len(xc)-1):
dxc = xc[i+1]-xc[i]
dyc = yc[i+1]-yc[i]
sc_aux += np.sqrt(dxc**2.+dyc**2.)
sc += [sc_aux]
# Regrouping
x = []
y = []
ii = []
jj = []
dens = []
eps = []
vx = []
vy = []
div = []
tracer = []
t = []
line_values_new = []
k = 0
i_first = 0
while True:
# Look for the bin fiting the first element
while l[k] < sc[i_first] <= l[k+1] == False:
k += 1
# Look for the other bin elements
for i in range(i_first, len(xc)):
if l[k] < sc[i] <= l[k+1]:
x += [xc[i]]
y += [yc[i]]
ii += [ic[i]]
jj += [jc[i]]
dens += [densc[i]]
eps += [epsc[i]]
vx += [vxc[i]]
vy += [vyc[i]]
div += [divc[i]]
tracer += [tracerc[i]]
t += [tc[i]]
else:
break
# Once the bin is full, save it as the first element
x_new = x[0]#np.average(x)
y_new = y[0]#np.average(y)
ii_new = ii[0]#np.int(np.floor(np.average(ii)))
jj_new = jj[0]#np.int(np.floor(np.average(jj)))
dens_new = dens[0]#np.average(dens)
eps_new = eps[0]#np.average(eps)
vx_new = vx[0]#np.average(vx)
vy_new = vy[0]#np.average(vy)
div_new = div[0]#np.average(div)
tracer_new = tracer[0]#np.average(tracer)
t_new = t[0]#np.average(t)
line_values_new.append([x_new, y_new, ii_new, jj_new,
dens_new, eps_new,
vx_new, vy_new, div_new,
tracer_new, t_new])
# Continue with the next bin
i_first = i
k += 1
x = []
y = []
ii = []
jj = []
dens = []
eps = []
vx = []
vy = []
div = []
tracer = []
t = []
if k >= len(l)-2 or i_first >= len(sc)-1:
x += [xc[i]]
y += [yc[i]]
ii += [ic[i]]
jj += [jc[i]]
dens += [densc[i]]
eps += [epsc[i]]
vx += [vxc[i]]
vy += [vyc[i]]
div += [divc[i]]
tracer += [tracerc[i]]
t += [tc[i]]
x_new = x[0]#np.average(x)
y_new = y[0]#np.average(y)
ii_new = ii[0]#np.int(np.floor(np.average(ii)))
jj_new = jj[0]#np.int(np.floor(np.average(jj)))
dens_new = dens[0]#np.average(dens)
eps_new = eps[0]#np.average(eps)
vx_new = vx[0]#np.average(vx)
vy_new = vy[0]#np.average(vy)
div_new = div[0]#np.average(div)
tracer_new = tracer[0]#np.average(tracer)
t_new = t[0]#np.average(t)
line_values_new.append([x_new, y_new, ii_new, jj_new,
dens_new, eps_new,
vx_new, vy_new, div_new,
tracer_new, t_new])
break
non_zero = len(line_values_new)
for z in range(resamp-non_zero):
x_new = 0
y_new = 0
ii_new = 0
jj_new = 0
dens_new = 0
eps_new = 0
vx_new = 0
vy_new = 0
div_new = 0
tracer_new = 0
t_new = 0
line_values_new.append([x_new, y_new, ii_new, jj_new,
dens_new, eps_new,
vx_new, vy_new, div_new,
tracer_new, t_new])
all_lines_new.append(line_values_new)
return all_lines_new
|
xparedesfortuny/pylines
|
resamp_line.py
|
Python
|
mit
| 6,769
|
from montemodes import classes as res
__author__ = 'abel'
import math
import random
import copy
import numpy as np
def weighted_choice(weights):
total = sum(weights)
threshold = random.uniform(0, total)
for k, weight in enumerate(weights):
total -= weight
if total < threshold:
return k
########### ALTERATION FUNCTIONS ########
def alteration_with_modes(molecule, conditions):
altered_coordinates = molecule.get_coordinates()
random_number = random.uniform(0, 1)
vibration = molecule.get_modes(conditions.energy_method)
normalization = 1
chosen = random.randrange(len(vibration.frequencies))
if conditions.number_of_modes_to_use is not None:
normalization = len(vibration.frequencies)/conditions.number_of_modes_to_use
if conditions.number_of_modes_to_use < len(vibration.frequencies):
chosen = random.randrange(conditions.number_of_modes_to_use)
if abs(vibration.frequencies[chosen]) > 0.01:
altered_coordinates += ( np.sqrt(conditions.expansion_factor*random_number*molecule.get_atomic_masses())
* pow(vibration.frequencies[chosen],-1)
* random.choice([1,-1])
* vibration.normalized_modes[chosen]
* normalization)
# * math.exp(
# -conditions.temperature_frequency_relation * vibration.frequencies[i] / conditions.temperature)
altered_molecule = copy.deepcopy(molecule)
altered_molecule.set_coordinates(altered_coordinates)
return altered_molecule
def alteration_internal_with_weights(molecule, conditions):
altered_coordinates = molecule.get_internal()
weights = molecule.get_int_weights()
altered_coordinates += np.prod([np.random.random((molecule.get_internal().shape[0],))-0.5,
weights],axis=0)[None].T * conditions.expansion_factor
altered_molecule = copy.deepcopy(molecule)
altered_molecule.set_internal(altered_coordinates)
return altered_molecule
def alteration_cartesian(molecule, conditions):
altered_coordinates = molecule.get_coordinates()
altered_coordinates += (np.random.random(molecule.get_coordinates().shape)-0.5) * conditions.expansion_factor
altered_molecule = copy.deepcopy(molecule)
altered_molecule.set_coordinates(altered_coordinates)
return altered_molecule
############ REGULATION FUNCTIONS #########
def average_gradient(vector):
poly_values = (np.polyfit( np.arange(len(vector)),vector,2))
poly_derivative = np.polyder(np.poly1d(poly_values))
return -poly_derivative(len(vector))
def adjust_expansion_factor(acceptation_vector, conditions):
last_acceptation_vector = acceptation_vector[-conditions.number_of_vales_for_average:]
# print(last_acceptation_vector)
if len(last_acceptation_vector) < conditions.number_of_vales_for_average:
return conditions.expansion_factor
current_gradient = average_gradient(last_acceptation_vector)
A = conditions.acceptation_regulator
target_gradient = A * 2 ** 3 * (acceptation_vector[-1] - 0.5) ** 3
# target_derivative = -A*(acceptation_vector[-1])*2
# print('derivative',current_derivative,target_derivative)
final = math.exp(target_gradient - current_gradient)
# print'F:', acceptation_vector[-1],Final
# conditions.expansion_factor *= Final
# print('Grad:',current_gradient,target_gradient,final)
return conditions.expansion_factor * final
##########MONTECARLO ALGORITHM###########
def calculate_MonteCarlo(simulation, conditions, show_text=True, alteration_type='cartesian'):
alteration = { 'cartesian' : alteration_cartesian,
'internal' : alteration_internal_with_weights,
'modes' : alteration_with_modes}
molecule = copy.deepcopy(simulation.trajectory[-1])
print 'Temperature {0}'.format(conditions.temperature)
print('Starting at:{0}'.format(simulation.number_of_cycles))
if show_text:
print(' Energy(cur) Energy(test) Accept cv')
for iteration in range(simulation.number_of_cycles, simulation.number_of_cycles + conditions.number_of_cycles):
simulation.update_acceptation_vector(iteration, conditions)
simulation.append_data_from_molecule(molecule)
conditions.expansion_factor = adjust_expansion_factor(simulation.acceptation_ratio_vector, conditions)
molecule_altered = alteration[alteration_type](molecule, conditions)
if show_text:
print('{0:12.5f} {1:12.5f} {2:2.3f} {3:2.3e} '.format(molecule.get_energy(conditions.energy_method),
molecule_altered.get_energy(conditions.energy_method),
simulation.acceptation_ratio, simulation.get_cv(conditions)))
if molecule.get_energy(conditions.energy_method) < molecule_altered.get_energy(conditions.energy_method):
energy_ratio = math.exp((molecule.get_energy(conditions.energy_method) - molecule_altered.get_energy(conditions.energy_method))
/ (conditions.temperature * conditions.kb))
if energy_ratio < random.random():
continue
molecule = molecule_altered
simulation.add_accepted(iteration,conditions)
simulation.number_of_cycles += conditions.number_of_cycles
return simulation
if __name__ == '__main__':
import montemodes.functions.reading as io_monte
import montemodes.classes.results as res
conditions = res.Conditions(temperature=500,
number_of_cycles=5,
initial_expansion_factor=1,
energy_method=2,
acceptation_regulator=0.1,
number_of_values_for_average=50)
molecule = io_monte.reading_from_gzmat_file('../test.gzmat')
print(molecule.get_coordinates())
molecule2 = alteration_cartesian(molecule, conditions)
print(molecule2.get_coordinates())
|
abelcarreras/MonteModes
|
montemodes/functions/montecarlo.py
|
Python
|
mit
| 6,111
|
#!/usr/bin/env python
# coding: UTF-8
from __future__ import division
from util.hosvd import HOSVD, unfold, reconstruct, frobenius_norm
from util.mobility import trans, init_data, init_data2, trans2
import numpy as np
import math
import pylab
# 第一步:构建转移概率tensor
# tensor = trans(count(init_db()))
# tensor = trans(count([0,1,0,1,0,1,1],2))
from util.tensor import hosvd
from util.tensor_old import hosvd2
from util.util import sparse
def recommend(users, time_slice, train_percent, top_n, use_type):
axis_poi, data_map, predicts, recommends = init_data2(users, train_percent, time_slice)
print "predicts: ", predicts
print "recommends: ", recommends
print "data_map: ", data_map
poi_dimension = len(axis_poi)
tensor = trans2(data_map, poi_dimension, users, time_slice)
print "tensor: ", tensor
# sparse(np.array(tensor))
threshold = 0.8
U, S, D = HOSVD(np.array(tensor), threshold)
# new_T, T, Z, Un, Sn, Vn = hosvd(tensor)
# new_T2, Z2, Un2, Sn2, Vn2 = hosvd2(tensor)
print "the mode-1 unfold of core tensor:"
print unfold(S, 1)
print "The n-mode singular values:"
print D
A = reconstruct(S, U)
print "reconstruct tensor: ", A
print frobenius_norm(tensor-A)
# sparse(A)
#
# print tensor[0][0][6]
# print A[0][0][6]
total = 0
available = 0
sum_precision = 0
sum_recall = 0
sum_f1_score = 0
for user in users:
data = data_map[user]
# print "data: ", data
for slot in range(0, time_slice):
check_list = data[slot]
data = A[users.index(user)][slot]
sort_data = []
for item in range(0, len(data)):
meta_data = (item, data[item])
sort_data.append(meta_data)
sort_data.sort(key=lambda x: x[1], reverse=True)
result_predict = []
result_recommend = []
for item in range(0, len(sort_data)):
if (sort_data[item][0] in set(data_map[user][slot])) and (len(result_predict) < top_n):
result_predict.append(sort_data[item][0])
else:
if len(result_recommend) < top_n:
result_recommend.append(sort_data[item][0])
# 1. 正确率 = 提取出的正确信息条数 / 提取出的信息条数
# 2. 召回率 = 提取出的正确信息条数 / 样本中的信息条数
# 两者取值在0和1之间,数值越接近1,查准率或查全率就越高。
# 3. F值 = 正确率 * 召回率 * 2 / (正确率 + 召回率) (F 值即为正确率和召回率的调和平均值)
check_predict = predicts[user][slot]
check_recommend = recommends[user][slot]
count_predict = 0
count_recommend = 0
for item in result_predict:
if item in check_predict:
count_predict += 1
for item in result_recommend:
if item in check_recommend:
count_recommend += 1
total += 1
if use_type == "recommendation":
if len(result_recommend) == 0:
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(推荐): 没有生成推荐数据,无法完成推荐"
else:
precision = count_recommend / len(result_recommend)
if len(check_recommend) == 0:
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(推荐): 校验推荐数据缺失,无法有效计算f1值"
else:
available += 1
recall = count_recommend / len(check_recommend)
if precision + recall == 0:
f1_score = 0
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(推荐): "+str(f1_score)
else:
f1_score = (2 * precision * recall) / (precision + recall)
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(推荐): "+str(f1_score)+",准确率为"+\
str(precision)+",召回率为"+str(recall)
sum_precision += precision
sum_recall += recall
sum_f1_score += f1_score
else:
if len(result_predict) == 0:
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(预测): 没有生成预测数据,无法完成预测"
else:
precision = count_predict / len(result_predict)
if len(check_predict) == 0:
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(预测): 校验预测数据缺失,无法有效计算f1值"
else:
available += 1
recall = count_predict / len(check_predict)
if precision + recall == 0:
f1_score = 0
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(预测): "+str(f1_score)
else:
f1_score = (2 * precision * recall) / (precision + recall)
print "用户"+str(user)+"在时间"+str(slot)+"的f1_score(预测): "+str(f1_score)+",准确率为"+\
str(precision)+",召回率为"+str(recall)
sum_precision += precision
sum_recall += recall
sum_f1_score += f1_score
return sum_precision / total, sum_recall / total, sum_f1_score / total, available / total
if __name__ == '__main__':
users = tuple(range(20, 30))
time_slice = 24
train_percent = 0.35
top_n = 3
use_type = "recommendation"
# use_type = "prediction"
train_percent = 0.1
y_values = []
x_values = []
y_values2 = []
y_values3 = []
y_values4 = []
while train_percent <= 0.8:
avg_precision, avg_recall, avg_f1_score, availability = recommend(users, time_slice, train_percent, top_n, use_type)
print "avg_precision: ", avg_precision
print "avg_recall: ", avg_recall
print "avg_f1_score: ", avg_f1_score
print "availability: ", availability
y_values.append(avg_precision)
y_values2.append(avg_recall)
y_values3.append(avg_f1_score)
y_values4.append(availability)
x_values.append(train_percent)
train_percent += 0.01
pylab.plot(x_values, y_values, color="blue", linewidth=1, linestyle="-", label="precision")
pylab.plot(x_values, y_values2, color="red", linewidth=1, linestyle="-", label="recall")
pylab.plot(x_values, y_values3, color="green", linewidth=1, linestyle="-", label="f1_score")
pylab.plot(x_values, y_values4, color="yellow", linewidth=1, linestyle="-", label="availability")
pylab.xlabel("train percent")
pylab.ylabel("result")
pylab.title("relation between train set and result(top_k=3, time_slice=24)")
pylab.legend(loc='upper right')
pylab.show()
# # train_percent 0.35左右f1 score达到极值
# avg_precision, avg_recall, avg_f1_score, availability = recommend(users, time_slice, train_percent, top_n, use_type)
# print "avg_precision: ", avg_precision
# print "avg_recall: ", avg_recall
# print "avg_f1_score: ", avg_f1_score
# print "availability: ", availability
|
pengyuan/markov2tensor
|
recommendation/fuzzy_recommend.py
|
Python
|
mit
| 7,681
|
from .. import db
import datetime
class Client(db.Model):
__tablename__='client'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.Unicode(16))
tel=db.Column(db.Unicode(16))
memo=db.Column(db.Unicode(64))
channel_id=db.Column(db.Integer,db.ForeignKey('channel.id'))
channel=db.relationship('Channel',backref=db.backref('clients'))
def __unicode__(self):
return self.name
|
colaftc/webtool
|
app/client/models.py
|
Python
|
mit
| 394
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TagValue(Model):
"""Tag information.
:param id: The tag ID.
:type id: str
:param tag_value: The tag value.
:type tag_value: str
:param count: The tag value count.
:type count: ~azure.mgmt.resource.resources.v2017_05_10.models.TagCount
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'tag_value': {'key': 'tagValue', 'type': 'str'},
'count': {'key': 'count', 'type': 'TagCount'},
}
def __init__(self, id=None, tag_value=None, count=None):
super(TagValue, self).__init__()
self.id = id
self.tag_value = tag_value
self.count = count
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-resource/azure/mgmt/resource/resources/v2017_05_10/models/tag_value.py
|
Python
|
mit
| 1,165
|
"""
Example of how to generate a Cyclo-Static dataflow graph
"""
from Turbine.generation.generate import generate
from Turbine.param.parameters import Parameters
print "###### Setup the CSDF generation ####"
c_param = Parameters()
# Set the CSDF type for the generation
c_param.set_dataflow_type("CSDF")
# Min/Max phase per task
c_param.set_min_phase_count(1)
c_param.set_max_phase_count(10)
# Min/Max arcs count per task
c_param.set_min_task_degree(1)
c_param.set_max_task_degree(3)
# Number of task in the dataflow
c_param.set_nb_task(100)
print "###### Generate CSDF dataflow #######"
CSDFG = generate("Test_of_SDFG", c_param)
print CSDFG
|
bbodin/turbine
|
Turbine/examples/generate_CSDFG.py
|
Python
|
gpl-2.0
| 649
|
import pytest
import numpy as np
def test_oudin():
from pypet import oudin, daily_extraterrestrial_radiation
doy = 1
latitude = 0.0
def py_oudin(d, l, t, density=1000, latent_heat=2.45):
re = daily_extraterrestrial_radiation(d, l)
if t > -5:
return 1e3 * re * (t + 5) / (100 * latent_heat * density)
return 0.0
py_oudin = np.vectorize(py_oudin)
T = 10
np.testing.assert_allclose(oudin(doy, latitude, T), py_oudin(doy, latitude, T))
T = np.linspace(-10, 10)
np.testing.assert_allclose(oudin(doy, latitude, T), py_oudin(doy, latitude, T))
|
jetuk/pypet
|
tests/test_pet_ufunc.py
|
Python
|
mit
| 618
|
# Copyright (c) 2015 Russell Sim <russell.sim@gmail.com>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'fairy_slipper.controllers.root.RootController',
'modules': ['fairy_slipper'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/fairy_slipper/templates',
'api_doc': '%(confdir)s/api_doc',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'fairy_slipper': {'level': 'DEBUG', 'handlers': ['console']},
'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
|
annegentle/fairy-slipper
|
config.py
|
Python
|
apache-2.0
| 2,149
|
"""
WSGI config for supportcenter project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "supportcenter.settings")
from __init__ import setup_paths, connect_db
setup_paths()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
connect_db()
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
LethusTI/supportcenter
|
supportcenter/wsgi.py
|
Python
|
gpl-3.0
| 1,220
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from urlparse import urlparse
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.copy_tarball import STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions.expect import expect
from resource_management.libraries import functions
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.core.utils import PasswordString
from resource_management.core.shell import checked_call
from ambari_commons.credential_store_helper import get_password_from_credential_store
# Default log4j version; put config files under /etc/hive/conf
log4j_version = '1'
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
architecture = get_architecture()
sudo = AMBARI_SUDO_BINARY
credential_store_enabled = False
if 'credentialStoreEnabled' in config:
credential_store_enabled = config['credentialStoreEnabled']
stack_root = status_params.stack_root
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
# node hostname
hostname = config["hostname"]
# This is expected to be of the form #.#.#.#
stack_version_unformatted = status_params.stack_version_unformatted
#stack_version_formatted_major = status_params.stack_version_formatted_major
# this is not available on INSTALL action because <stack-selector-tool> is not available
#stack_version_formatted = functions.get_stack_version('hive-server2')
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
# current host stack version
current_version = default("/hostLevelParams/current_version", None)
# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
#force True since check_stack_feature doesn't work
stack_supports_ranger_kerberos=True
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_hive_jdbc_url_change = check_stack_feature(StackFeature.RANGER_HIVE_PLUGIN_JDBC_URL, version_for_stack_feature_checks)
stack_supports_atlas_hook_for_hive_interactive = check_stack_feature(StackFeature.HIVE_INTERACTIVE_ATLAS_HOOK_REQUIRED, version_for_stack_feature_checks)
stack_supports_hive_interactive_ga = True
# component ROLE directory (like hive-metastore or hive-server2-hive2)
component_directory = status_params.component_directory
component_directory_interactive = status_params.component_directory_interactive
hadoop_home = format('/usr/lib/hadoop')
hive_bin = format('/usr/lib/hive/bin')
hive_schematool_ver_bin = format('/usr/lib/hive/bin')
hive_schematool_bin = format('/usr/lib/hive/bin')
hive_lib = format('/usr/lib/hive/lib')
hive_version_lib = format('/usr/lib/hive/lib')
hive_var_lib = '/var/lib/hive'
hive_user_home_dir = "/home/hive"
# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
hive_server2_hive2_dir = None
hive_server2_hive2_lib = None
#if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, version_for_stack_feature_checks):
# the name of the hiveserver2-hive2 component
hive_server2_hive2_component = status_params.SERVER_ROLE_DIRECTORY_MAP["HIVE_SERVER_INTERACTIVE"]
# when using the version, we can just specify the component as "hive2"
hive_schematool_ver_bin = format('/usr/lib/hive/bin')
# use the schematool which ships with hive2
hive_schematool_bin = format('/usr/lib/hive/bin')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_dir = format('/usr/lib/hive')
# <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
hive_server2_hive2_version_dir = format('/usr/lib/hive')
# <stack-root>/current/hive-server2-hive2/lib -> <stack-root>/<version>/hive2/lib
hive_server2_hive2_lib = format('/usr/lib/hive/lib')
# <stack-root>/<version>/hive2/lib
hive_server2_hive2_version_lib = format('/usr/lib/hive/lib')
hive_interactive_bin = format('/usr/lib/hive/bin')
hive_interactive_lib = format('/usr/lib/hive/lib')
# Heap dump related
heap_dump_enabled = default('/configurations/hive-env/enable_heap_dump', None)
heap_dump_opts = "" # Empty if 'heap_dump_enabled' is False.
if heap_dump_enabled:
heap_dump_path = default('/configurations/hive-env/heap_dump_location', "/tmp")
heap_dump_opts = " -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="+heap_dump_path
# Hive Interactive related paths
hive_interactive_var_lib = '/var/lib/hive2'
# These tar folders were used in previous stack versions, e.g., HDP 2.1
hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')
sqoop_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/sqoop*.tar.gz')
hive_metastore_site_supported = False
hive_etc_dir_prefix = "/etc/hive"
hive_interactive_etc_dir_prefix = "/etc/hive2"
limits_conf_dir = "/etc/security/limits.d"
hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
# use the directories from status_params as they are already calculated for
# the correct stack version
hadoop_conf_dir = status_params.hadoop_conf_dir
hadoop_bin_dir = status_params.hadoop_bin_dir
webhcat_conf_dir = status_params.webhcat_conf_dir
hive_conf_dir = status_params.hive_conf_dir
hive_home_dir = status_params.hive_home_dir
hive_config_dir = status_params.hive_config_dir
hive_client_conf_dir = status_params.hive_client_conf_dir
hive_server_conf_dir = status_params.hive_server_conf_dir
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
hcat_lib = '/usr/lib/hive/hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/lib/hive/hcatalog/sbin'
# --- Tarballs ---
# DON'T CHANGE THESE VARIABLE NAMES
# Values don't change from those in copy_tarball.py
webhcat_apps_dir = "/apps/webhcat"
hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
pig_tar_source = "{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
pig_tar_dest_file = "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_source = "{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
sqoop_tar_source = "{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
hadoop_streaming_tar_dest_dir = "/{0}/apps/{1}/mapreduce/".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
sqoop_tar_dest_dir = "/{0}/apps/{1}/sqoop/".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
tarballs_mode = 0444
purge_tables = "false"
# Starting from stack version for feature hive_purge_table drop should be executed with purge
if check_stack_feature(StackFeature.HIVE_PURGE_TABLE, version_for_stack_feature_checks):
purge_tables = 'true'
if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, version_for_stack_feature_checks):
# this is NOT a typo. Configs for hcatalog/webhcat point to a
# specific directory which is NOT called 'conf'
hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
if check_stack_feature(StackFeature.HIVE_METASTORE_SITE_SUPPORT, version_for_stack_feature_checks):
hive_metastore_site_supported = True
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
jdk_location = config['hostLevelParams']['jdk_location']
if credential_store_enabled:
if 'hadoop.security.credential.provider.path' in config['configurations']['hive-site']:
cs_lib_path = config['configurations']['hive-site']['credentialStoreClassPath']
java_home = config['hostLevelParams']['java_home']
alias = 'javax.jdo.option.ConnectionPassword'
provider_path = config['configurations']['hive-site']['hadoop.security.credential.provider.path']
hive_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
else:
raise Exception("hadoop.security.credential.provider.path property should be set")
else:
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#HACK Temporarily use dbType=azuredb while invoking schematool
if hive_metastore_db_type == "mssql":
hive_metastore_db_type = "azuredb"
#users
hive_user = config['configurations']['hive-env']['hive_user']
# is it a restart command
is_restart_command = False
if 'roleCommand' in config and 'CUSTOM_COMMAND' == config['roleCommand']:
if 'custom_command' in config['hostLevelParams'] and 'RESTART' == config['hostLevelParams']['custom_command']:
is_restart_command = True
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
java_share_dir = '/usr/share/java'
hive_database_name = config['configurations']['hive-env']['hive_database_name']
hive_database = config['configurations']['hive-env']['hive_database']
hive_use_existing_db = hive_database.startswith('Existing')
default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
"com.mysql.jdbc.Driver":"mysql-connector-java.jar",
"org.postgresql.Driver":"postgresql-jdbc.jar",
"oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
"sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
hive_previous_jdbc_jar_name = None
if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
default_mysql_jar_name = "mysql-connector-java.jar"
default_mysql_target = format("{hive_lib}/{default_mysql_jar_name}")
hive_previous_jdbc_jar = format("{hive_lib}/{hive_previous_jdbc_jar_name}")
if not hive_use_existing_db:
jdbc_jar_name = default_mysql_jar_name
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
hive_jdbc_target = format("{hive_lib}/{jdbc_jar_name}")
hive2_jdbc_target = None
if hive_server2_hive2_dir:
hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")
# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
if upgrade_direction:
hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None
hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
# is now pointing to the upgraded version location; that's bad for the cp command
source_jdbc_file = format("/usr/lib/hive/lib/{jdbc_jar_name}")
check_db_connection_jar_name = "/DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
"org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
prepackaged_jdbc_name = "ojdbc6.jar"
prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
templeton_port = config['configurations']['webhcat-site']['templeton.port']
#constants for type2 jdbc
jdbc_libs_dir = format("{hive_lib}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
libs_in_hive_lib = format("{jdbc_libs_dir}/*")
# Start, Common Hosts and Ports
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_metastore_hosts = default('/clusterHostInfo/hive_metastore_host', [])
hive_metastore_host = hive_metastore_hosts[0] if len(hive_metastore_hosts) > 0 else None
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris'])
hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
hive_server_host = hive_server_hosts[0] if len(hive_server_hosts) > 0 else None
hive_server_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
hive_server_interactive_host = hive_server_interactive_hosts[0] if len(hive_server_interactive_hosts) > 0 else None
hive_server_interactive_ha = True if len(hive_server_interactive_hosts) > 1 else False
# End, Common Hosts and Ports
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
if hive_transport_mode.lower() == "http":
hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
else:
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
# ssl options
hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
hive_interactive_ssl_keystore_path = default('/configurations/hive-interactive-site/hive.server2.keystore.path', None)
hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
hive_interactive_ssl_keystore_password = default('/configurations/hive-interactive-site/hive.server2.keystore.password', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_metastore_principal = config['configurations']['hive-site']['hive.metastore.kerberos.principal']
hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#hive_env
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
hive_interactive_pid = status_params.hive_interactive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_client_conf_dir]
# These are the folders to which the configs will be written to.
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER']
if status_params.role == "HIVE_METASTORE" and hive_metastore_hosts is not None and hostname in hive_metastore_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER" and hive_server_hosts is not None and hostname in hive_server_host:
hive_conf_dirs_list.append(hive_server_conf_dir)
elif status_params.role == "HIVE_SERVER_INTERACTIVE" and hive_server_interactive_hosts is not None and hostname in hive_server_interactive_hosts:
hive_conf_dirs_list.append(status_params.hive_server_interactive_conf_dir)
ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER_INTERACTIVE']
# log4j version is 2 for hive2; put config files under /etc/hive2/conf
if status_params.role == "HIVE_SERVER_INTERACTIVE":
log4j_version = '2'
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
# Hive Server Interactive
slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
target_hive_interactive = format("{hive_interactive_lib}/{jdbc_jar_name}")
hive_intaractive_previous_jdbc_jar = format("{hive_interactive_lib}/{hive_previous_jdbc_jar_name}")
jars_in_hive_lib = format("{hive_lib}/*.jar")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE", "HIVE_SERVER_INTERACTIVE"]:
hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
else:
hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
java_version = expect("/hostLevelParams/java_version", int)
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
#### Metastore
# initialize the schema only if not in an upgrade/downgrade
init_metastore_schema = upgrade_direction is None
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#Hive log4j properties
webhcat_log_maxfilesize = default("/configurations/webhcat-log4j/webhcat_log_maxfilesize", 256)
webhcat_log_maxbackupindex = default("/configurations/webhcat-log4j/webhcat_log_maxbackupindex", 20)
hive_log_maxfilesize = default("/configurations/hive-log4j/hive_log_maxfilesize", 256)
hive_log_maxbackupindex = default("/configurations/hive-log4j/hive_log_maxbackupindex", 30)
hive_log_level = default("/configurations/hive-env/hive.log.level", "INFO")
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#webhcat-log4j.properties.template
if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
else:
log4j_webhcat_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
daemon_name = status_params.daemon_name
process_name = status_params.process_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0755
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
# Tez-related properties
tez_user = config['configurations']['tez-env']['tez_user']
# Tez jars
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
if OSCheck.is_ubuntu_family():
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
mysql_user = 'mysql'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
hive_site_config = dict(config['configurations']['hive-site'])
########################################################
############# AMS related params #####################
########################################################
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
enable_atlas_hook = default('/configurations/hive-env/hive.atlas.hook', False)
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# Hive Interactive related
hive_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
has_hive_interactive = len(hive_interactive_hosts) > 0
#llap log4j properties
hive_llap_log_maxfilesize = default('/configurations/llap-daemon-log4j/hive_llap_log_maxfilesize', 256)
hive_llap_log_maxbackupindex = default('/configurations/llap-daemon-log4j/hive_llap_log_maxbackupindex', 240)
#hive log4j2 properties
hive2_log_maxfilesize = default('/configurations/hive-log4j2/hive2_log_maxfilesize', 256)
hive2_log_maxbackupindex = default('/configurations/hive-log4j2/hive2_log_maxbackupindex', 30)
#llap cli log4j2 properties
llap_cli_log_maxfilesize = default('/configurations/llap-cli-log4j2/llap_cli_log_maxfilesize', 256)
llap_cli_log_maxbackupindex = default('/configurations/llap-cli-log4j2/llap_cli_log_maxbackupindex', 30)
if has_hive_interactive:
llap_daemon_log4j = config['configurations']['llap-daemon-log4j']['content']
llap_cli_log4j2 = config['configurations']['llap-cli-log4j2']['content']
hive_log4j2 = config['configurations']['hive-log4j2']['content']
hive_exec_log4j2 = config['configurations']['hive-exec-log4j2']['content']
beeline_log4j2 = config['configurations']['beeline-log4j2']['content']
hive_server_interactive_conf_dir = status_params.hive_server_interactive_conf_dir
execute_path_hive_interactive = os.path.join(os.environ['PATH'], hive_interactive_bin, hadoop_bin_dir)
start_hiveserver2_interactive_script = 'startHiveserver2Interactive.sh.j2'
start_hiveserver2_interactive_path = format("{tmp_dir}/start_hiveserver2_interactive_script")
hive_interactive_env_sh_template = config['configurations']['hive-interactive-env']['content']
hive_interactive_enabled = default('/configurations/hive-interactive-env/enable_hive_interactive', False)
llap_app_java_opts = default('/configurations/hive-interactive-env/llap_java_opts', '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}')
hive_interactive_heapsize = hive_heapsize
llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
# Ambari upgrade may not add this config as it will force restart of HSI (stack upgrade should)
if 'hive_heapsize' in config['configurations']['hive-interactive-env']:
hive_interactive_heapsize = config['configurations']['hive-interactive-env']['hive_heapsize']
# Service check related
if hive_transport_mode.lower() == "http":
hive_server_interactive_port = config['configurations']['hive-interactive-site']['hive.server2.thrift.http.port']
else:
hive_server_interactive_port = default('/configurations/hive-interactive-site/hive.server2.thrift.port',"10500")
# Tez for Hive interactive related
tez_interactive_config_dir = "/etc/tez_hive2/conf"
tez_interactive_user = config['configurations']['tez-env']['tez_user']
num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
# Used in LLAP slider package creation
yarn_nm_mem = config['configurations']['yarn-site']['yarn.nodemanager.resource.memory-mb']
if stack_supports_hive_interactive_ga:
num_llap_daemon_running_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes_for_llap_daemons']
num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
llap_logger = default('/configurations/hive-interactive-site/hive.llap.daemon.logger', 'query-routing')
hive_aux_jars = default('/configurations/hive-interactive-env/hive_aux_jars', '')
hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
llap_extra_slider_opts = default('/configurations/hive-interactive-env/llap_extra_slider_opts', "")
hive_llap_principal = None
if security_enabled:
hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
pass
if len(hive_server_hosts) == 0 and len(hive_server_interactive_hosts) > 0:
hive_server2_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
else:
hive_server2_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
if security_enabled:
hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
hive_cluster_token_zkstore = default("/configurations/hive-site/hive.cluster.delegation.token.store.zookeeper.znode", None)
jaas_file = os.path.join(hive_config_dir, 'zkmigrator_jaas.conf')
hive_zk_namespace = default("/configurations/hive-site/hive.zookeeper.namespace", None)
# ranger hive plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger hive plugin enabled property
enable_ranger_hive = config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger'
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
# Force True
xml_configurations_supported = True
# get ranger hive properties if enable_ranger_hive is True
if enable_ranger_hive:
# get ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger hive service name
repo_name = str(config['clusterName']) + '_hive'
repo_name_value = config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_hive:
external_admin_username = default('/configurations/ranger-hive-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-hive-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
repo_config_password = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
ranger_downloaded_custom_connector = None
ranger_previous_jdbc_jar_name = None
ranger_driver_curl_source = None
ranger_driver_curl_target = None
ranger_previous_jdbc_jar = None
# to get db connector related properties
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
ranger_jdbc_jar_name, ranger_previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}")
ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}")
sql_connector_jar = ''
ranger_hive_url = format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url
if stack_supports_ranger_hive_jdbc_url_change:
ranger_hive_url = format("jdbc:hive2://{hive_zookeeper_quorum}/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace={hive_server2_zookeeper_namespace}")
hive_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'jdbc.driverClassName': jdbc_driver_class_name,
'jdbc.url': ranger_hive_url,
'commonNameForCertificate': common_name_for_certificate
}
hive_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hive_ranger_plugin_config),
'description': 'hive repo',
'name': repo_name,
'repositoryType': 'hive',
'assetType': '3'
}
if stack_supports_ranger_kerberos and security_enabled:
hive_ranger_plugin_config['policy.download.auth.users'] = hive_user
hive_ranger_plugin_config['tag.download.auth.users'] = hive_user
hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = hive_user
if stack_supports_ranger_kerberos:
hive_ranger_plugin_config['ambari.service.check.user'] = policy_user
hive_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hive_ranger_plugin_config,
'description': 'hive repo',
'name': repo_name,
'type': 'hive'
}
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# ranger hive plugin section end
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.5/services/HIVE/package/scripts/params_linux.py
|
Python
|
apache-2.0
| 42,040
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
import datetime as dt
import io
import pkgutil
import weakref
from six.moves import range
import arduino_helpers.hardware.teensy as teensy
import arduino_helpers.hardware.teensy.adc as adc
import arduino_helpers.hardware.teensy.dma as dma
import arduino_helpers.hardware.teensy.pdb as pdb
import numpy as np
import pandas as pd
import six
import teensy_minimal_rpc.DMA as DMA
import teensy_minimal_rpc.SIM as SIM
def get_adc_configs(F_BUS=48e6, ADC_CLK=22e6):
'''
Returns
-------
pandas.DataFrame
Table containing one ADC configuration per row.
Notes
-----
The conversion time is calculated according to the "ConversionTime"
equation (see 31.4.5.5/681) in `K20P64M72SF1RM` manual.
**TODO** The conversion times calculated in this function seem to differ
from those calculated by the Kinetis ADC conversion time calculator. For
now, we assume that they are close enough for practical use, but there
might be some edge cases where inappropriate ADC settings may be chosen as
a result.
.. versionchanged:: X.X.X
Load table of ADC configurations using ``pkgutil`` to support reading
from ``.zip`` files, e.g., Py2Exe ``library.zip`` packaged modules.
'''
# Read serialized (CSV) table of all possible ADC configurations (not all
# valid).
#
# XXX Load using `pkgutil` to support reading from `.zip` files, e.g.,
# Py2Exe `library.zip` packaged modules.
csv_data = pkgutil.get_data(__name__, 'static/data/adc_configs.csv')
df_adc_configs = pd.read_csv(io.BytesIO(csv_data))
df_adc_configs = (df_adc_configs
.loc[(df_adc_configs['CFG2[ADACKEN]'] == 0) &
(df_adc_configs['CFG1[ADICLK]'] == 1)])
df_adc_configs.insert(3, 'CFG1[ADIV]', 0)
df_adc_configs['ADC_CLK'] = ADC_CLK
# Maximum ADC clock for 16-bit conversion is 11MHz.
df_adc_configs.loc[(df_adc_configs['Bit-width'] >= 16) &
(df_adc_configs['ADC_CLK'] > 11e6),
'ADC_CLK'] = 11e6
# If the ADC clock is 8MHz or higher, ADHSC (high-speed clock) bit
# must be set.
df_adc_configs.loc[df_adc_configs.ADC_CLK >= 8e6, 'CFG2[ADHSC]'] = 1
conversion_time = (df_adc_configs['bus clock'] / F_BUS +
df_adc_configs.extra_us * 1e-6 +
1 / df_adc_configs.ADC_CLK
* (df_adc_configs.ADCK
+ df_adc_configs.AverageNum
* (df_adc_configs.ADCK_bct +
df_adc_configs.ADCK_lst_adder +
df_adc_configs.ADCK_hsc_adder)))
df_adc_configs['conversion_time'] = conversion_time
df_adc_configs['conversion_rate'] = 1 / conversion_time
return (df_adc_configs.reset_index(drop=True).drop_duplicates()
.sort_values('conversion_time'))
DEFAULT_ADC_CONFIGS = get_adc_configs()
HW_TCDS_ADDR = 0x40009000
TCD_RECORD_DTYPE = [('SADDR', 'uint32'),
('SOFF', 'uint16'),
('ATTR', 'uint16'),
('NBYTES', 'uint32'),
('SLAST', 'uint32'),
('DADDR', 'uint32'),
('DOFF', 'uint16'),
('CITER', 'uint16'),
('DLASTSGA', 'uint32'),
('CSR', 'uint16'),
('BITER', 'uint16')]
class AdcSampler(object):
'''
This class manages configuration of an analog-to-digital converter (ADC)
and three DMA channels to sample multiple measurements from one or more
analog input channels.
Parameters
----------
proxy : teensy_minimal_rpc.proxy.Proxy
channels : list
List of labels of analog channels to measure (e.g., ``['A0', 'A3',
'A1']``).
sample_count : int
Number of samples to measure from each channel during each read
operation.
dma_channels : list,optional
List of identifiers of DMA channels to use (default=``[0, 1, 2]``).
adc_number : int
Identifier of ADC to use (default=:data:`teensy.ADC_0`)
'''
def __init__(self, proxy, channels, sample_count,
dma_channels=None, adc_number=teensy.ADC_0):
# Use weak reference to prevent zombie `proxy` staying alive even after
# deleting the original `proxy` reference.
self.proxy = weakref.ref(proxy)
if isinstance(channels, six.string_types):
# Single channel was specified. Wrap channel in list.
channels = [channels]
self.channels = channels
# The number of samples to record for each ADC channel.
self.sample_count = sample_count
if dma_channels is None:
dma_channels = pd.Series([0, 1, 2],
index=['scatter', 'adc_channel_configs',
'adc_conversion'])
self.dma_channels = dma_channels
self.adc_number = adc_number
# Map Teensy analog channel labels to channels in
# `ADC_SC1x` format.
self.channel_sc1as = np.array(adc.SC1A_PINS[channels].tolist(),
dtype='uint32')
# Enable PDB clock (DMA and ADC clocks should already be enabled).
self.proxy().update_sim_SCGC6(SIM.R_SCGC6(PDB=True))
self.allocate_device_arrays()
self.reset()
self.configure_adc()
self.configure_dma()
self._sample_rate_hz = None
self._pdb_config = None
@property
def sample_rate_hz(self):
return self._sample_rate_hz
@sample_rate_hz.setter
def sample_rate_hz(self, value):
if self.sample_rate_hz != value:
self._sample_rate_hz = value
self.pdb_config = self.configure_timer(self._sample_rate_hz)
@property
def pdb_config(self):
return self._pdb_config
@pdb_config.setter
def pdb_config(self, value):
self._pdb_config = np.uint32(value)
def configure_dma(self):
self.configure_dma_channel_adc_conversion_mux()
self.assert_no_dma_error()
self.configure_dma_channel_scatter()
self.assert_no_dma_error()
self.configure_dma_channel_adc_channel_configs()
self.assert_no_dma_error()
self.configure_dma_channel_adc_conversion()
self.assert_no_dma_error()
self.configure_dma_channel_adc_channel_configs_mux()
self.assert_no_dma_error()
def assert_no_dma_error(self):
df_dma_registers = self.proxy().DMA_registers()
df_errors = df_dma_registers.loc[(df_dma_registers.full_name == 'ERR')
& (df_dma_registers.value > 0)]
if df_errors.shape[0] > 0:
raise IOError('One or more DMA errors occurred.\n%s' % df_errors)
def allocate_device_arrays(self):
'''
Dynamically allocate (using :func:`malloc`) arrays on Teensy device.
Notes
-----
The :meth:`__del__` frees the memory allocated by this method.
+-------------+------------------------------+-----------------------+
| Name | Description | Size (bytes) |
+=============+==============================+=======================+
| scan_result | Measurements of single scan | len(:attr:`channels`) |
| | through input channels | * sizeof(uint16) |
+-------------+------------------------------+-----------------------+
| tcds | Transfer Control Descriptors | :attr:`sample_count` |
| | | * sizeof(uint32) |
+-------------+------------------------------+-----------------------+
For each **analog input channel**:
+---------+----------------------------------+----------------------+
| Name | Description | Size (bytes) |
+=========+==================================+======================+
| sc1as | ``SC1A`` register configuration | 1 |
| | specifying input channel address | |
+---------+----------------------------------+----------------------+
| samples | Contiguous measurements for | :attr:`sample_count` |
| | input channel | * sizeof(uint16) |
+---------+----------------------------------+----------------------+
'''
# Calculate total number of bytes for single scan of ADC channels.
self.N = np.dtype('uint16').itemsize * self.channel_sc1as.size
# Use series to store all allocations. This makes it easy
# to, e.g., free allocated device memory on clean up.
self.allocs = pd.Series()
# Allocate device memory for results from single ADC scan.
self.allocs['scan_result'] = self.proxy().mem_alloc(self.N)
# Allocate and copy channel SC1A configurations to device memory.
self.allocs['sc1as'] = (self.proxy()
.mem_aligned_alloc_and_set(4,
self.channel_sc1as
.view('uint8')))
# Allocate device memory for sample buffer for each
# ADC channel.
self.allocs['samples'] = self.proxy().mem_alloc(self.sample_count *
self.N)
# Allocate device memory for DMA TCD configurations. __N.B.,__ Transfer
# control descriptors are 32 bytes each and MUST be aligned to
# 0-modulo-32 address.
self.allocs['tcds'] = self.proxy().mem_aligned_alloc(32,
self.sample_count
* 32)
# Store list of device TCD configuration addresses.
self.tcd_addrs = [self.allocs.tcds + 32 * i
for i in range(self.sample_count)]
# Store list of device TCD register addresses.
# __N.B.,__ There are 16 DMA channels on the device.
# __TODO__ Query `proxy` to determine number of DMA channels.
self.hw_tcd_addrs = [dma.HW_TCDS_ADDR + 32 * i for i in range(16)]
def reset(self):
'''
Fill result arrays with zeros.
See also
--------
:meth:`allocate_device_arrays`
'''
self.proxy().mem_fill_uint8(self.allocs.scan_result, 0, self.N)
self.proxy().mem_fill_uint8(self.allocs.samples, 0, self.sample_count *
self.N)
def configure_dma_channel_adc_channel_configs(self):
'''
Configure DMA channel ``adc_channel_configs`` to copy SC1A
configurations from :attr:`channel_sc1as`, one at a time, to the
:data:`ADC0_SC1A` register (i.e., ADC Status and Control Register 1).
See also
--------
:meth:`configure_dma_channel_adc_conversion`
:meth:`configure_dma_channel_scatter`
Section **ADC Status and Control Registers 1 (ADCx_SC1n) (31.3.1/653)**
in `K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
sca1_tcd_msg = \
DMA.TCD(CITER_ELINKNO=
DMA.R_TCD_ITER_ELINKNO(ELINK=False,
ITER=self.channel_sc1as.size),
BITER_ELINKNO=
DMA.R_TCD_ITER_ELINKNO(ELINK=False,
ITER=self.channel_sc1as.size),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._32_BIT,
DSIZE=DMA.R_TCD_ATTR._32_BIT),
NBYTES_MLNO=4, # `SDA1` register is 4 bytes (32-bit)
SADDR=int(self.allocs.sc1as),
SOFF=4,
SLAST=-self.channel_sc1as.size * 4,
DADDR=int(adc.ADC0_SC1A),
DOFF=0,
DLASTSGA=0,
CSR=DMA.R_TCD_CSR(START=0, DONE=False))
self.proxy().update_dma_TCD(self.dma_channels.adc_channel_configs,
sca1_tcd_msg)
def configure_dma_channel_adc_channel_configs_mux(self):
'''
Configure DMA channel ``adc_channel_configs`` to trigger based on
programmable delay block timer.
See also
--------
Section **DMA MUX request sources (3.3.8.1/77)** in `K20P64M72SF1RM`_
manual.
Section **DMA channels with periodic triggering capability
(20.4.1/367)** in `K20P64M72SF1RM`_ manual.
Section **Channel Configuration register (``DMAMUX_CHCFGn``)
(20.3.1/366)** in `K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
# Configure DMA channel `i` enable to use MUX triggering from
# programmable delay block.
self.proxy().update_dma_mux_chcfg(self.dma_channels.adc_channel_configs,
DMA.MUX_CHCFG(SOURCE=
dma.DMAMUX_SOURCE_PDB,
TRIG=False, ENBL=True))
# Set enable request for DMA channel `i`.
#
# [1]: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
self.proxy().update_dma_registers(
DMA.Registers(SERQ=int(self.dma_channels.adc_channel_configs)))
def configure_dma_channel_adc_conversion_mux(self):
'''
Set mux source for DMA channel ``adc_conversion`` to ADC0 and enable
DMA for ADC0.
See also
--------
Section **Channel Configuration register (``DMAMUX_CHCFGn``)
(20.3.1/366)** in `K20P64M72SF1RM`_ manual.
Section **Status and Control Register 2 (ADCx_SC2) (31.3.6/661)** in
`K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
self.proxy().update_dma_mux_chcfg(
self.dma_channels.adc_conversion,
DMA.MUX_CHCFG(
# Route ADC0 as DMA channel source.
SOURCE=dma.DMAMUX_SOURCE_ADC0,
TRIG=False,# Disable periodic trigger.
# Enable the DMAMUX configuration for channel.
ENBL=True))
# Update ADC0_SC2 to enable DMA and assert the ADC DMA request during
# an ADC conversion complete event noted when any of the `SC1n[COCO]`
# (i.e., conversion complete) flags is asserted.
self.proxy().enableDMA(teensy.ADC_0)
def configure_dma_channel_adc_conversion(self):
'''
Configure DMA channel ``adc_conversion`` to:
- Copy result after completion of each ADC conversion to subsequent
locations in :attr:`allocs.scan_result` array.
- Trigger DMA channel ``adc_channel_configs`` after each ADC
conversion to copy next ADC configuration to SC1A register (i.e.,
ADC Status and Control Register 1).
- Trigger DMA channel ``scatter`` to start after completion of major
loop.
Notes
-----
After starting PDB timer with :meth:`start_read`, DMA channel
``adc_conversion`` will continue to scan analog input channels
until the PDB timer is stopped. The handler for the completed
scatter DMA interrupt currently stops the PDB timer.
See also
--------
:meth:`configure_dma_channel_adc_channel_configs`
:meth:`configure_dma_channel_scatter`
Section **ADC Status and Control Registers 1 (ADCx_SC1n) (31.3.1/653)**
in `K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
# **NOTE**: "When the CITER field is initially loaded by software, it
# must be set to the same value as that contained in the BITER field."
# CITER is current major iteration count, and BITER is the
# starting/beginning major iteration count.
#
# See `CITER` field section **TCD Current Minor Loop Link, Major Loop
# Count (Channel Linking Disabled) (`DMA_TCDn_CITER_ELINKNO`)
# (21.3.27/423)
tcd_msg = DMA.TCD(
CITER_ELINKYES=
DMA.R_TCD_ITER_ELINKYES(ELINK=True,
LINKCH=1,
ITER=self.channel_sc1as.size),
BITER_ELINKYES=
DMA.R_TCD_ITER_ELINKYES(ELINK=True,
LINKCH=1,
ITER=self.channel_sc1as.size),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,
DSIZE=DMA.R_TCD_ATTR._16_BIT),
NBYTES_MLNO=2, # sizeof(uint16)
SADDR=adc.ADC0_RA,
SOFF=0,
SLAST=0,
DADDR=int(self.allocs.scan_result),
DOFF=2,
DLASTSGA=-self.N,
CSR=DMA.R_TCD_CSR(START=0, DONE=False,
# Start `scatter` DMA channel
# after completion of major loop.
MAJORELINK=True,
MAJORLINKCH=
int(self.dma_channels.scatter)))
self.proxy().update_dma_TCD(self.dma_channels.adc_conversion, tcd_msg)
# DMA request input signals and this enable request flag
# must be asserted before a channel’s hardware service
# request is accepted (21.3.3/394).
self.proxy().update_dma_registers(
DMA.Registers(SERQ=int(self.dma_channels.adc_conversion)))
def configure_dma_channel_scatter(self):
'''
Configure a Transfer Control Descriptor structure for *each scan* of
the analog input channels, copy TCD structures to device, and attach
DMA interrupt handler to scatter DMA channel.
Notes
-----
To measure :attr:`sample_count` measurements from each analog input
channel in :attr:`channels`, :attr:`sample_count` consecutive scans
through the analog channels are performed.
A Transfer Control Descriptor structure is configured for *each scan*
to the scatters the results from the scan to the next index position in
the samples array of each analog input channel.
See also
--------
:meth:`allocate_device_arrays`
:meth:`configure_dma_channel_adc_channel_configs`
:meth:`configure_dma_channel_adc_conversion`
'''
# Create Transfer Control Descriptor configuration for first chunk, encoded
# as a Protocol Buffer message.
tcd0_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,
DSIZE=DMA.R_TCD_ATTR._16_BIT),
# N=analog input channels * sizeof(uint16_t)
NBYTES_MLNO=self.N,
SADDR=int(self.allocs.scan_result),
SOFF=2,
SLAST=-self.N,
DADDR=int(self.allocs.samples),
DOFF=2 * self.sample_count,
DLASTSGA=int(self.tcd_addrs[1]),
CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=True))
# Convert Protocol Buffer encoded TCD to bytes structure (see
# `TCD_RECORD_DTYPE`).
tcd0 = self.proxy().tcd_msg_to_struct(tcd0_msg)
# Create binary TCD struct for each TCD protobuf message and copy to
# device memory.
for i in range(self.sample_count):
tcd_i = tcd0.copy()
# Copy from `scan_result` array.
tcd_i['SADDR'] = self.allocs.scan_result
# Perform strided copy to next available `samples` location for
# each analog input channel.
tcd_i['DADDR'] = self.allocs.samples + 2 * i
# After copying is finished, load Transfer Control Descriptor for
# next sample scan.
tcd_i['DLASTSGA'] = self.tcd_addrs[(i + 1)
% len(self.tcd_addrs)]
tcd_i['CSR'] |= (1 << 4)
if i == (self.sample_count - 1):
# Last sample, so trigger major loop interrupt
tcd_i['CSR'] |= (1 << 1) # Set `INTMAJOR` (21.3.29/426)
# Copy TCD for sample number `i` to device.
self.proxy().mem_cpy_host_to_device(self.tcd_addrs[i],
tcd_i.tostring())
# Load initial TCD in scatter chain to DMA channel chosen to handle
# scattering.
self.proxy().mem_cpy_host_to_device(self.hw_tcd_addrs
[self.dma_channels.scatter],
tcd0.tostring())
# Attach interrupt handler to scatter DMA channel.
self.proxy().attach_dma_interrupt(self.dma_channels.scatter)
def configure_adc(self):
'''
Select ``b`` input for ADC MUX.
Notes
-----
It seems like this has to do with chip pinout configuration, where
on the Teensy 3.2, the ``b`` ADC inputs are used?
See also
--------
Section **ADC Configuration Register 2 (``ADCx_CFG2``) (31.3.3/658)**
in `K20P64M72SF1RM`_ manual.
Section **ADC0 Channel Assignment for 64-Pin Package (3.7.1.3.1.1/98)**
in `K20P64M72SF1RM`_ manual.
Section **K20 Signal Multiplexing and Pin Assignments (10.3.1/207)** in
`K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
import teensy_minimal_rpc.ADC as ADC
self.proxy().update_adc_registers(
self.adc_number,
ADC.Registers(CFG2=ADC.R_CFG2(MUXSEL=ADC.R_CFG2.B)))
def configure_timer(self, sample_rate_hz):
'''
Configure programmable delay block according to specified sampling
rate, but **do not** copy configuration to ``PDB_CONFIG`` register.
Notes
-----
Enable software trigger, continuous mode, and **generate a DMA
request** instead of an interrupt **when the programmed delay has
passed**.
Parameters
----------
sample_rate_hz : int
Sample rate in Hz.
Returns
-------
int
Programmable delay block configuration
See also
--------
Chapter 35 **Programmable Delay Block (PDB) (35.1/747)** in
`K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
# Set PDB interrupt to occur when IDLY is equal to CNT + 1.
# PDB0_IDLY = 1
self.proxy().mem_cpy_host_to_device(pdb.PDB0_IDLY,
np.uint32(1).tostring())
clock_divide = pdb.get_pdb_divide_params(sample_rate_hz).iloc[0]
# PDB0_MOD = (uint16_t)(mod-1);
self.proxy().mem_cpy_host_to_device(pdb.PDB0_MOD,
np.uint32(clock_divide.clock_mod)
.tostring())
PDB_CONFIG = (pdb.PDB_SC_TRGSEL(15) # Software trigger
| pdb.PDB_SC_PDBEN # Enable PDB
| pdb.PDB_SC_CONT # Continuous
| pdb.PDB_SC_LDMOD(0)
| pdb.PDB_SC_PRESCALER(clock_divide.prescaler)
| pdb.PDB_SC_MULT(clock_divide.mult_)
| pdb.PDB_SC_DMAEN # Enable DMA
| pdb.PDB_SC_LDOK) # Load all new values
return PDB_CONFIG
def start_read(self, sample_rate_hz=None, stream_id=0):
'''
**TODO** Throw exception if previous read has not completed yet.
Otherwise, this method may clobber a currently running DMA ADC read
operation, potentially rendering the microcontroller unresponsive.
Trigger start of ADC sampling at the specified sampling rate.
1. Start PDB timer according to specified sample rate (in Hz).
2. Each completion of the PDB timer triggers DMA channel
``adc_channel_configs`` request.
3. Each request to DMA channel ``adc_channel_configs`` copies the
configuration for an analog input channel to SC1A register (i.e.,
ADC Status and Control Register 1), which triggers analog
conversion.
4. Completion of analog conversion triggers DMA channel
``adc_conversion`` to copy result from ``ADC0_RA`` to the
respective position in the :data:``scan_result`` array.
5. After each scan through **all channels** (i.e., after each DMA
channel ``adc_conversion`` major loop completion), trigger for DMA
channel ``scatter``.
6. Each request to the ``scatter`` DMA channel scatters results from
one scan pass to append onto a separate array for each analog input
channel.
7. Steps 2-6 repeat :attr:`sample_count` times.
Parameters
----------
sample_rate_hz : int, optional
Sample rate in Hz.
If not specified, use ``sample_rate_hz`` setting from previous call.
stream_id : int, optional
Stream identifier.
May be passed to :method:`get_results_async` to filter related DMA
data.
Returns
-------
AdcSampler
Returns reference to ``self`` to enable method call chaining, e.g.,
``adc_sampler.start_read(...).get_results_async(...)``.
See also
--------
:method:`get_results`
:method:`get_results_async`
Section **ADC Status and Control Registers 1 (ADCx_SC1n) (31.3.1/653)**
in `K20P64M72SF1RM`_ manual.
.. _K20P64M72SF1RM: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
self.proxy().attach_dma_interrupt(self.dma_channels.scatter)
if sample_rate_hz is None and self.sample_rate_hz is None:
raise ValueError('No cached sampling rate available. Must specify'
' `sample_rate_hz` (can be omitted on subsequent '
'calls).')
self.sample_rate_hz = sample_rate_hz
# Copy configured PDB register state to device hardware register.
result = self.proxy().start_dma_adc(self.pdb_config,
self.allocs.samples,
self.sample_count * self.N,
stream_id)
if not result:
raise RuntimeError('Previous DMA ADC operation in progress.')
return self
def get_results(self):
'''
Returns
-------
pandas.DataFrame
Table containing :attr:`sample_count` ADC readings for each analog
input channel.
Notes
-----
**Does not guarantee result is ready!**
**TODO** Provide mechanism to poll status of previously started
read.
'''
data = self.proxy().mem_cpy_device_to_host(self.allocs.samples,
self.sample_count * self.N)
df_adc_results = pd.DataFrame(data.view('uint16')
.reshape(-1, self.sample_count).T,
columns=self.channels)
return df_adc_results
def get_results_async(self, timeout_s=None):
'''
Returns
-------
pandas.DataFrame
Table containing :attr:`sample_count` ADC readings for each analog
input channel, indexed by:
- ADC DMA stream identifier (i.e., ``stream_id``).
- Measurement timestamp.
Notes
-----
**Does not guarantee result is ready!**
'''
stream_queue = self.proxy()._packet_watcher.queues.stream
frames = []
start_time = dt.datetime.now()
while stream_queue.qsize() < 1:
if (timeout_s is not None and (timeout_s <
(dt.datetime.now() -
start_time).total_seconds())):
raise IOError('Timed out waiting for streamed result.')
# At least one packet is available in the ADC stream queue.
packet_count = stream_queue.qsize()
for i in range(packet_count):
datetime_i, packet_i = stream_queue.get_nowait()
datetimes_i = [datetime_i + dt.timedelta(seconds=t_j)
for t_j in np.arange(self.sample_count) *
1. / self.sample_rate_hz]
df_adc_results_i = pd.DataFrame(np.fromstring(packet_i.data(),
dtype='uint16')
.reshape(-1, self.sample_count).T,
columns=self.channels,
index=datetimes_i)
df_adc_results_i.index.name = 'timestamp'
# Mark the frame with the corresponding stream identifier.
df_adc_results_i.insert(0, 'stream_id', packet_i.iuid)
frames.append(df_adc_results_i)
return (pd.concat(frames).set_index('stream_id', append=True)
.reorder_levels(['stream_id', 0]))
def __del__(self):
self.allocs[['scan_result', 'samples']].map(self.proxy().mem_free)
self.allocs[['sc1as', 'tcds']].map(self.proxy().mem_aligned_free)
class AdcDmaMixin(object):
'''
This mixin class implements DMA-enabled analog-to-digital converter (ADC)
methods.
'''
def init_dma(self):
'''
Initialize eDMA engine. This includes:
- Enabling clock gating for DMA and DMA mux.
- Resetting all DMA channel transfer control descriptors.
See the following sections in [K20P64M72SF1RM][1] for more
information:
- (12.2.13/256) System Clock Gating Control Register 6 (SIM_SCGC6)
- (12.2.14/259) System Clock Gating Control Register 7 (SIM_SCGC7)
- (21.3.17/415) TCD registers
[1]: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
from teensy_minimal_rpc.SIM import R_SCGC6, R_SCGC7
# Enable DMA-related clocks in clock-gating configuration
# registers.
# SIM_SCGC6 |= SIM_SCGC6_DMAMUX;
self.update_sim_SCGC6(R_SCGC6(DMAMUX=True))
# SIM_SCGC7 |= SIM_SCGC7_DMA;
self.update_sim_SCGC7(R_SCGC7(DMA=True))
# Reset all DMA transfer control descriptor registers (i.e., set to
# 0).
for i in range(self.dma_channel_count()):
self.reset_dma_TCD(i)
def DMA_TCD(self, dma_channel):
'''
Parameters
----------
dma_channel : int
DMA channel for which to read the Transfer Control Descriptor.
Returns
-------
pandas.DataFrame
Table of Transfer Control Descriptor fields with corresponding
values.
In addition, several reference columns are included. For
example, the ``page`` column indicates the page in the
[reference manual][1] where the respective register field is
described.
[1]: http://www.freescale.com/products/arm-processors/kinetis-cortex-m/adc-calculator:ADC_CALCULATOR
'''
from arduino_rpc.protobuf import resolve_field_values
import arduino_helpers.hardware.teensy.dma as dma
from teensy_minimal_rpc.DMA import TCD
tcd = TCD.FromString(self.read_dma_TCD(dma_channel).tostring())
df_tcd = resolve_field_values(tcd)
return (df_tcd[['full_name', 'value']].dropna()
.join(dma.TCD_DESCRIPTIONS, on='full_name')
[['full_name', 'value', 'short_description', 'page']]
.sort_values(['page','full_name']))
def DMA_registers(self):
'''
Returns
-------
pandas.DataFrame
Table of DMA configuration fields with corresponding values.
In addition, several reference columns are included. For
example, the ``page`` column indicates the page in the
[reference manual][1] where the respective register field is
described.
[1]: http://www.freescale.com/products/arm-processors/kinetis-cortex-m/adc-calculator:ADC_CALCULATOR
'''
from arduino_rpc.protobuf import resolve_field_values
import arduino_helpers.hardware.teensy.dma as dma
from teensy_minimal_rpc.DMA import Registers
dma_registers = (Registers
.FromString(self.read_dma_registers().tostring()))
df_dma = resolve_field_values(dma_registers)
return (df_dma.dropna(subset=['value'])
.join(dma.REGISTERS_DESCRIPTIONS, on='full_name')
[['full_name', 'value', 'short_description', 'page']])
def tcd_msg_to_struct(self, tcd_msg):
'''
Convert Transfer Control Descriptor from Protocol Buffer message
encoding to raw structure, i.e., 32 bytes starting from `SADDR`
field.
See 21.3.17/415 in the [manual][1] for more details.
Parameters
----------
tcd_msg : teensy_minimal_rpc.DMA.TCD
Transfer Control Descriptor Protocol Buffer message.
Returns
-------
numpy.ndarray(dtype=TCD_RECORD_DTYPE)
Raw Transfer Control Descriptor structure (i.e., 32 bytes
starting from ``SADDR`` field) as a :class:`numpy.ndarray` of
type :data:`TCD_RECORD_DTYPE`.
See also
--------
:meth:`tcd_struct_to_msg`
[1]: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
# Copy TCD to device so that we can extract the raw bytes from
# device memory (raw TCD bytes are read into `tcd0`.
#
# **TODO**:
# - Modify `TeensyMinimalRpc/DMA.h::serialize_TCD` and
# `TeensyMinimalRpc/DMA.h::update_TCD` functions to work offline.
# * Operate on variable by reference, on-device use actual register.
# - Add `arduino_helpers.hardware.teensy` function to convert
# between TCD protobuf message and binary TCD struct.
self.update_dma_TCD(0, tcd_msg)
return (self.mem_cpy_device_to_host(HW_TCDS_ADDR, 32)
.view(TCD_RECORD_DTYPE)[0])
def tcd_struct_to_msg(self, tcd_struct):
'''
Convert Transfer Control Descriptor from raw structure (i.e., 32
bytes starting from ``SADDR`` field) to Protocol Buffer message
encoding.
See 21.3.17/415 in the [manual][1] for more details.
Parameters
----------
tcd_struct : numpy.ndarray
Raw Transfer Control Descriptor structure (i.e., 32 bytes
starting from ``SADDR`` field)
Returns
-------
teensy_minimal_rpc.DMA.TCD
Transfer Control Descriptor Protocol Buffer message.
See also
--------
:meth:`tcd_msg_to_struct`
[1]: https://www.pjrc.com/teensy/K20P64M72SF1RM.pdf
'''
from teensy_minimal_rpc.DMA import TCD
# Copy TCD structure to device so that we can extract the
# serialized protocol buffer representation from the device.
#
# **TODO**:
# - Modify `TeensyMinimalRpc/DMA.h::serialize_TCD` and
# `TeensyMinimalRpc/DMA.h::update_TCD` functions to operate on
# arbitrary source address (instead of hard-coded address of TCD
# register).
# * Operate on variable by reference, on-device use actual register.
# - Add `arduino_helpers.hardware.teensy` function to convert between TCD
# protobuf message and binary TCD struct.
self.mem_cpy_host_to_device(HW_TCDS_ADDR, tcd_struct.tostring())
return TCD.FromString(self.read_dma_TCD(0).tostring())
def analog_reads_config(self, adc_channels, sample_count,
resolution=None, average_count=1,
sampling_rate_hz=None, differential=False,
gain_power=0, adc_num=teensy.ADC_0):
'''
Configure ADC sampler to read multiple samples from a single ADC
channel, using the minimum conversion rate for specified sampling
parameters.
The reasoning behind selecting the minimum conversion rate is that
we expect it to lead to the lowest noise possible while still
matching the specified requirements.
**TODO** Should this be handled differently?
This function uses the following mutator methods:
- :meth:`setReference` (C++)
* Set the reference voltage based on whether or not
differential is selected.
On the Teensy 3.2 architecture, the 1.2V reference voltage
*must* be used when operating in differential (as opposed to
singled-ended) mode.
- :meth:`update_adc_registers` (C++)
* Apply ADC ``CFG*`` register settings.
ADC ``CFG*`` register settings are determined by:
- :data:`average_count`
- :data:`differential`
- :data:`gain_power` (PGA only)
- :data:`resolution`
- :data:`sampling_rate_hz`
- :meth:`setAveraging` (C++)
* Apply ADC hardware averaging setting using Teensy ADC
library API.
We use the Teensy API here because it handles calibration,
etc. automatically.
- :meth:`setResolution` (C++)
* Apply ADC resolution setting using Teensy ADC library API.
We use the Teensy API here because it handles calibration,
etc. automatically.
Parameters
----------
adc_channels : string or list
ADC channel to measure (e.g., ``'A0'``, ``'PGA0'``, etc.).
Multiple channels may be specified to perform interleaved reads.
sample_count : int
Number of samples to measure.
resolution : int,optional
Bit resolution to sample at. Must be one of: 8, 10, 12, 16.
average_count : int,optional
Hardware average count.
sampling_rate_hz : int,optional
Sampling rate. If not specified, sampling rate will be based
on minimum conversion rate based on remaining ADC settings.
differential : bool,optional
If ``True``, use differential mode. Otherwise, use single-ended
mode.
.. note::
**Differential mode** is automatically enabled for ``PGA*``
measurements (e.g., :data:`adc_channel=='PGA0'`).
gain_power : int,optional
When measuring a ``'PGA*'`` channel (also implies differential
mode), apply a gain of ``2^gain_power`` using the hardware
programmable amplifier gain.
adc_num : int,optional
The ADC to use for the measurement (default is
``teensy.ADC_0``).
Returns
-------
sampling_rate_hz : int
Number of samples per second.
adc_settings : pandas.Series
ADC settings used.
adc_sampler : teensy_minimal_rpc.adc_sampler.AdcSampler
An ADC sampler object.
Calls to the
:meth:`teensy_minimal_rpc.adc_sampler.AdcSampler.start_read`
method asynchronously initiate reading of the configured
channels/sample count, at a specified sampling rate.
The
:meth:`teensy_minimal_rpc.adc_sampler.AdcSampler.get_results_async`
method may be called to fetch results from a previously
initiated read operation.
'''
# XXX Import here, since importing at top of file seems to cause a
# cyclic import error when importing `__init__`.
import teensy_minimal_rpc.ADC as ADC
# Select ADC settings to achieve minimum conversion rate for
# specified resolution, mode (i.e., single-ended or differential),
# and number of samples to average per conversion (i.e., average
# count).
bit_width = resolution
if isinstance(adc_channels, six.string_types):
# Single channel was specified. Wrap channel in list.
adc_channels = [adc_channels]
# Enable programmable gain if `'PGA'` is part of any selected channel
# name.
enabled_programmable_gain = any('PGA' in channel_i
for channel_i in adc_channels)
if enabled_programmable_gain:
differential = True
if differential:
if (resolution is not None) and ((resolution < 16) and not
(resolution & 0x01)):
# An even number of bits was specified for resolution in
# differential mode. However, bit-widths are actually
# increased by one bit, where the additional bit indicates
# the sign of the result.
bit_width += 1
elif gain_power > 0:
raise ValueError('Programmable gain amplification is only '
'valid in differential mode.')
mode = 'differential' if differential else 'single-ended'
# Build up a query mask based on specified options.
query = ((DEFAULT_ADC_CONFIGS.AverageNum == average_count) &
(DEFAULT_ADC_CONFIGS.Mode == mode))
if resolution is not None:
query &= (DEFAULT_ADC_CONFIGS['Bit-width'] == bit_width)
if sampling_rate_hz is not None:
query &= (DEFAULT_ADC_CONFIGS.conversion_rate >=
sampling_rate_hz)
# Use prepared query mask to select matching settings from the
# table of valid ADC configurations.
matching_settings = DEFAULT_ADC_CONFIGS.loc[query]
# Find and select the ADC configuration with the minimum conversion
# rate.
# **TODO** The reasoning behind selecting the minimum conversion
# rate is that we expect it to lead to the lowest noise possible
# while still matching the specified requirements.
min_match_index = matching_settings['conversion_rate'].argmin()
adc_settings = matching_settings.loc[min_match_index].copy()
if resolution is None:
resolution = int(adc_settings['Bit-width'])
# Set the reference voltage based on whether or not differential is
# selected.
if differential:
# On the Teensy 3.2 architecture, the 1.2V reference voltage
# *must* be used when operating in differential (as opposed to
# singled-ended) mode.
self.setReference(teensy.ADC_REF_1V2, adc_num)
reference_V = 1.2
else:
self.setReference(teensy.ADC_REF_3V3, adc_num)
reference_V = 3.3
adc_settings['reference_V'] = reference_V
adc_settings['resolution'] = resolution
adc_settings['differential'] = differential
# Verify that a valid gain value has been specified.
assert(gain_power >= 0 and gain_power < 8)
adc_settings['gain_power'] = int(gain_power)
# Construct a Protocol Buffer message according to the selected ADC
# configuration settings.
adc_registers = \
ADC.Registers(CFG2=
ADC.R_CFG2(MUXSEL=ADC.R_CFG2.B,
ADACKEN=
int(adc_settings['CFG2[ADACKEN]']),
ADLSTS=
int(adc_settings['CFG2[ADLSTS]']),
ADHSC=
int(adc_settings['CFG2[ADHSC]'])),
CFG1=
ADC.R_CFG1(ADLSMP=
int(adc_settings['CFG1[ADLSMP]']),
ADICLK=
int(adc_settings['CFG1[ADICLK]']),
ADIV=
int(adc_settings['CFG1[ADIV]'])))
if enabled_programmable_gain:
adc_registers.PGA.PGAG = adc_settings.gain_power
adc_registers.PGA.PGAEN = True
# Apply ADC `CFG*` register settings.
self.update_adc_registers(adc_num, adc_registers)
# Apply non-`CFG*` ADC settings using Teensy ADC library API.
# We use the Teensy API here because it handles calibration, etc.
# automatically.
self.setAveraging(average_count, adc_num)
self.setResolution(resolution, adc_num)
if sampling_rate_hz is None:
# By default, use a sampling rate that is 90% of the maximum
# conversion rate for the selected ADC settings.
#
# XXX We arrived at this after a handful of empirical
# tests. We think this is necessary due to the slight deviation
# of the computed conversion rates (see TODO in
# `adc_sampler.get_adc_configs`) from those computed by the
# [Freescale ADC calculator][1].
#
# [1]: http://www.freescale.com/products/arm-processors/kinetis-cortex-m/adc-calculator:ADC_CALCULATOR
sampling_rate_hz = (int(.9 * adc_settings.conversion_rate) &
0xFFFFFFFE)
# Create `AdcSampler` for:
# - The specified sample count.
# - The specified channel.
# * **N.B.,** The `AdcSampler` supports scanning multiple
# channels, but in this function, we're only reading from a
# single channel.
# **Creation of `AdcSampler` object initializes DMA-related
# registers**.
adc_sampler = AdcSampler(self, adc_channels, sample_count)
return sampling_rate_hz, adc_settings, adc_sampler
def analog_reads(self, adc_channels, sample_count, resolution=None,
average_count=1, sampling_rate_hz=None,
differential=False, gain_power=0,
adc_num=teensy.ADC_0, timeout_s=None):
'''
Read multiple samples from a single ADC channel, using the minimum
conversion rate for specified sampling parameters.
Parameters
----------
adc_channels : string or list
ADC channel to measure (e.g., ``'A0'``, ``'PGA0'``, etc.).
Multiple channels may be specified to perform interleaved reads.
sample_count : int
Number of samples to measure.
resolution : int,optional
Bit resolution to sample at. Must be one of: 8, 10, 12, 16.
average_count : int,optional
Hardware average count.
sampling_rate_hz : int,optional
Sampling rate. If not specified, sampling rate will be based
on minimum conversion rate based on remaining ADC settings.
differential : bool,optional
If ``True``, use differential mode. Otherwise, use single-ended
mode.
.. note::
**Differential mode** is automatically enabled for ``PGA*``
measurements (e.g., :data:`adc_channel=='PGA0'`).
gain_power : int,optional
When measuring a ``'PGA*'`` channel (also implies differential
mode), apply a gain of ``2^gain_power`` using the hardware
programmable amplifier gain.
adc_num : int,optional
The ADC to use for the measurement (default is
``teensy.ADC_0``).
Returns
-------
sampling_rate_hz : int
Number of samples per second.
adc_settings : pandas.Series
ADC settings used.
df_volts : pandas.DataFrame
Voltage readings (based on reference voltage and gain).
df_adc_results : pandas.DataFrame
Raw ADC values (range depends on resolution, i.e.,
``adc_settings['Bit-width']``).
See also
--------
:meth:`analog_reads_config`
'''
sample_rate_hz, adc_settings, adc_sampler = \
self.analog_reads_config(adc_channels, sample_count,
resolution=resolution,
average_count=average_count,
sampling_rate_hz=sampling_rate_hz,
differential=differential,
gain_power=gain_power,
adc_num=adc_num)
adc_sampler.start_read(sample_rate_hz=sample_rate_hz)
start_time = dt.datetime.now()
while self._packet_watcher.queues.stream.qsize() < 1:
if (timeout_s is not None and (timeout_s <
(dt.datetime.now() -
start_time).total_seconds())):
raise IOError('Timed out waiting for streamed result.')
df_adc_results = adc_sampler.get_results_async()
return (sample_rate_hz, adc_settings) + \
format_adc_results(df_adc_results, adc_settings)
def format_adc_results(df_adc_results, adc_settings):
'''
Convert raw ADC readings to actual voltages.
Parameters
----------
adc_settings : pandas.Series
ADC settings used.
df_adc_results : pandas.DataFrame
Raw ADC values (range depends on resolution, i.e.,
``adc_settings['Bit-width']``).
Returns
-------
df_volts : pandas.DataFrame
Voltage readings (based on reference voltage and gain).
df_adc_results : pandas.DataFrame
Raw ADC values (from input argument).
'''
dtype = 'int16' if adc_settings.differential else 'uint16'
df_adc_results = df_adc_results.astype(dtype)
scale = adc_settings.reference_V / (1 << (adc_settings.resolution +
adc_settings.gain_power))
df_volts = scale * df_adc_results
return df_volts, df_adc_results
|
wheeler-microfluidics/teensy-minimal-rpc
|
teensy_minimal_rpc/adc_sampler.py
|
Python
|
gpl-3.0
| 51,254
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq manage --list`."""
from collections import defaultdict
import os.path
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import Domain, Sandbox, Host
from aquilon.aqdb.model.feature import hardware_features, host_features
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.branch import get_branch_and_author
from aquilon.worker.dbwrappers.feature import check_feature_template
from aquilon.worker.dbwrappers.host import (hostlist_to_hosts,
check_hostlist_size,
validate_branch_author)
from aquilon.worker.formats.branch import AuthoredSandbox
from aquilon.worker.locks import CompileKey
from aquilon.worker.processes import GitRepo
from aquilon.worker.templates import TemplateDomain
from aquilon.worker.dbwrappers.change_management import ChangeManagement
def validate_branch_commits(dbsource, dbsource_author,
dbtarget, dbtarget_author, logger, config):
"""
Verify that we're not leaving changes behind.
This function verifies that we're not losing changes in the source sandbox
or domain that were not committed, not published, or not merged into the
target sandbox or domain.
"""
domainsdir = config.get('broker', 'domainsdir')
if isinstance(dbsource, Sandbox):
authored_sandbox = AuthoredSandbox(dbsource, dbsource_author)
source_path = authored_sandbox.path
else:
source_path = os.path.join(domainsdir, dbsource.name)
if isinstance(dbtarget, Sandbox):
authored_sandbox = AuthoredSandbox(dbtarget, dbtarget_author)
target_path = authored_sandbox.path
else:
target_path = os.path.join(domainsdir, dbtarget.name)
source_repo = GitRepo(source_path, logger)
target_repo = GitRepo(target_path, logger)
kingrepo = GitRepo.template_king(logger)
# Check if the source has anything uncommitted
git_status = source_repo.run(["status", "--porcelain"])
if git_status:
raise ArgumentError("The source {0:l} contains uncommitted files."
.format(dbsource))
# Get latest source commit and tree ID
source_commit = source_repo.ref_commit()
source_tree = source_repo.ref_tree()
if not source_commit or not source_tree: # pragma: no cover
raise ArgumentError("Unable to retrieve the last commit information "
"from source {0:l}.".format(dbsource))
# Verify that the source is fully published, i.e. template-king has the same
# commit
king_commit = kingrepo.ref_commit(dbsource.name, compel=False)
if king_commit != source_commit:
raise ArgumentError("The latest commit of {0:l} has not been "
"published to template-king yet.".format(dbsource))
# Check if target branch has the latest source commit
found = target_repo.ref_contains_commit(source_commit)
if not found:
# If the commit itself was not found, try to check if the two heads
# point to the same tree object, and the difference is only in history
# (e.g. merging the same sandbox into different domains will create
# different merge commits).
target_tree = target_repo.ref_tree()
found = target_tree == source_tree
if not found:
raise ArgumentError("The target {0:l} does not contain the latest "
"commit from source {1:l}.".format(dbtarget,
dbsource))
class CommandManageList(BrokerCommand):
requires_plenaries = True
required_parameters = ["list"]
def get_objects(self, session, list, **_):
check_hostlist_size(self.command, self.config, list)
dbhosts = hostlist_to_hosts(session, list)
failed = []
dbsource, dbsource_author = validate_branch_author(dbhosts)
for dbhost in dbhosts:
# check if any host in the list is a cluster node
if dbhost.cluster:
failed.append("Cluster nodes must be managed at the "
"cluster level; {0} is a member of {1:l}."
.format(dbhost.fqdn, dbhost.cluster))
if failed:
raise ArgumentError("Cannot modify the following hosts:\n%s" %
"\n".join(failed))
return (dbsource, dbsource_author, dbhosts)
def render(self, session, logger, plenaries, domain, sandbox, force,
skip_auto_compile, user, justification, reason, **arguments):
dbbranch, dbauthor = get_branch_and_author(session, domain=domain,
sandbox=sandbox, compel=True)
if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
raise ArgumentError("Managing objects to {0:l} is not allowed."
.format(dbbranch))
dbsource, dbsource_author, objects = self.get_objects(session,
logger=logger,
**arguments)
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
for obj in objects:
if isinstance(obj, Host):
cm.consider(obj)
cm.validate()
if isinstance(dbsource, Sandbox) and not dbsource_author and not force:
raise ArgumentError("Unable to determine location of sandbox due to "
"missing user record. Please manually verify "
"there are no uncommitted and unpublished "
"changes and then re-run using --force.")
auto_compile = False
# If target is a sandbox
if sandbox and isinstance(dbsource, Sandbox) and not skip_auto_compile:
auto_compile = True
# If target is a domain
elif domain and dbbranch.auto_compile and not skip_auto_compile:
auto_compile = True
if not force:
validate_branch_commits(dbsource, dbsource_author,
dbbranch, dbauthor, logger, self.config)
if isinstance(dbbranch, Domain):
features = defaultdict(set)
personality_stages = set()
for dbobj in objects:
dbstage = dbobj.personality_stage
personality_stages.add(dbstage)
if hasattr(dbobj, 'hardware_entity'):
feats = hardware_features(dbstage,
dbobj.hardware_entity.model)
features[dbstage.archetype].update(feats)
for dbstage in personality_stages:
pre, post = host_features(dbstage)
features[dbstage.archetype].update(pre)
features[dbstage.archetype].update(post)
for dbarch, featureset in features.items():
for dbfeature in featureset:
check_feature_template(self.config, dbarch, dbfeature,
dbbranch)
for dbobj in objects:
if dbsource != dbbranch:
logger.client_info("Moving {0:l} from {1:l} to {2:l}"
.format(dbobj, dbsource, dbbranch))
plenaries.add(dbobj)
dbobj.branch = dbbranch
dbobj.sandbox_author = dbauthor
session.flush()
# We're crossing domains, need to lock everything.
with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
CompileKey(domain=dbbranch.name, logger=logger)]):
plenaries.stash()
try:
plenaries.write(locked=True)
if auto_compile:
td = TemplateDomain(dbbranch, dbauthor, logger=logger)
td.compile(session, only=plenaries.object_templates)
except:
plenaries.restore_stash()
raise
return
|
quattor/aquilon
|
lib/aquilon/worker/commands/manage_list.py
|
Python
|
apache-2.0
| 9,010
|
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE',
'DIS', 'RAD', 'TAX', 'PTRATIO',
'B', 'LSTAT', 'MEDV']
data = pd.read_csv('housing.data', delim_whitespace=True, names=names)
array = data.values
x = array[:, 0:13]
y = array[:, 13]
kfold = KFold(n_splits=10, random_state=7)
model = LinearRegression()
scoring = 'neg_mean_absolute_error' # 'accuracy' is default
result = cross_val_score(model, x, y, cv=kfold, scoring=scoring)
print(result.mean(), result.std())
|
Swaraj1998/MyCode
|
ML-Workshop/mean_abs_error.py
|
Python
|
mit
| 675
|
#!/usr/bin/env python3
from distutils.core import setup
setup(
name='figurePXJ',
version='0.0.0',
author='Xingjie Pan',
author_email='xingjiepan@gmail.com',
url='https://github.com/xingjiepan/figurePXJ',
packages=[
'figurePXJ',
],
install_requires=[
'numpy',
'scipy',
'matplotlib',
],
entry_points={
'console_scripts': [
],
},
description='A toolkit for making figures with styles that PXJ likes.',
long_description=open('README.rst').read(),
classifiers=[
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
],
)
|
xingjiepan/figurePXJ
|
setup.py
|
Python
|
gpl-3.0
| 668
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'demo.views.index', name='index'),
url(r'^all_events/', 'demo.views.all_events', name='all_events'),
url(r'^admin/', include(admin.site.urls)),
)
|
rodrigoamaral/django-fullcalendar
|
demo/demo/urls.py
|
Python
|
mit
| 320
|
import pygame
from snake.resource import R
from snake.scene import Scene
from snake.ui.information import Information
class MenuScene(Scene):
def setup(self):
self.information_ui = Information()
R.play_music("menu")
def update(self, delta):
self.information_ui.update(delta)
def render(self, screen):
screen.fill((0, 0, 0))
self.information_ui.render(screen)
def on_key_down(self, key):
if key == pygame.K_SPACE:
R.get_sound("enter").play()
self.start_and_reset_scene("game")
|
ccmikechen/snake_game
|
snake/scenes/menu_scene.py
|
Python
|
mit
| 568
|
from mig_app import app, db
from models import Foo
@app.route('/')
def index():
return "this is the index"
@app.route('/query')
def query_all():
result = Foo.query.filter_by(id=1).first()
return result.fooname
|
Skablam/Flask-Migration-Example
|
mig_app/server.py
|
Python
|
mit
| 224
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import importlib
import unittest
from unittest import mock
from azure.cli.core.profiles import ResourceType
from azure.cli.command_modules.acs import _loadbalancer as loadbalancer
class TestLoadBalancer(unittest.TestCase):
def test_configure_load_balancer_profile(self):
managed_outbound_ip_count = 5
outbound_ips = None
outbound_ip_prefixes = None
outbound_ports = 80
idle_timeout = 3600
# load models directly (instead of through the `get_sdk` method provided by the cli component)
from azure.cli.core.profiles._shared import AZURE_API_PROFILES
sdk_profile = AZURE_API_PROFILES["2020-09-01-hybrid"][
ResourceType.MGMT_CONTAINERSERVICE
]
api_version = sdk_profile.default_api_version
module_name = "azure.mgmt.containerservice.v{}.models".format(
api_version.replace("-", "_")
)
module = importlib.import_module(module_name)
ManagedClusterLoadBalancerProfile = getattr(
module, "ManagedClusterLoadBalancerProfile"
)
ManagedClusterLoadBalancerProfileManagedOutboundIPs = getattr(
module, "ManagedClusterLoadBalancerProfileManagedOutboundIPs"
)
ManagedClusterLoadBalancerProfileOutboundIPs = getattr(
module, "ManagedClusterLoadBalancerProfileOutboundIPs"
)
ManagedClusterLoadBalancerProfileOutboundIPPrefixes = getattr(
module, "ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
)
ResourceReference = getattr(module, "ResourceReference")
lb_models = {
"ManagedClusterLoadBalancerProfile": ManagedClusterLoadBalancerProfile,
"ManagedClusterLoadBalancerProfileManagedOutboundIPs": ManagedClusterLoadBalancerProfileManagedOutboundIPs,
"ManagedClusterLoadBalancerProfileOutboundIPs": ManagedClusterLoadBalancerProfileOutboundIPs,
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes": ManagedClusterLoadBalancerProfileOutboundIPPrefixes,
"ResourceReference": ResourceReference,
}
profile = ManagedClusterLoadBalancerProfile()
profile.managed_outbound_i_ps = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=2
)
profile.outbound_i_ps = ManagedClusterLoadBalancerProfileOutboundIPs(
public_i_ps="public_i_ps"
)
profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes="public_ip_prefixes"
)
p = loadbalancer.configure_load_balancer_profile(
managed_outbound_ip_count,
outbound_ips,
outbound_ip_prefixes,
outbound_ports,
idle_timeout,
profile,
lb_models,
)
self.assertEqual(p.managed_outbound_i_ps.count, 5)
self.assertEqual(p.outbound_i_ps, None)
self.assertEqual(p.outbound_ip_prefixes, None)
self.assertEqual(p.allocated_outbound_ports, 80)
self.assertEqual(p.idle_timeout_in_minutes, 3600)
if __name__ == '__main__':
unittest.main()
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/acs/tests/hybrid_2020_09_01/test_loadbalancer.py
|
Python
|
mit
| 3,509
|
#!/usr/bin/env python3
# fileencoding: utf-8
import os
import sys
import unittest
cafepypath = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(cafepypath)
from cafepy.files import Ninfo
class TestReadNinfo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testdata = os.path.join(os.path.dirname(__file__), 'data/test.ninfo')
def setUp(self):
pass
def test_read_bond_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 2, 1, 2, 3.8223, 1.0, 1.0, 110.4, 'pp'], tmp['bond'][0])
def test_read_angl_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 2, 3, 1, 2, 3, 131.492, 0.0, 1.0, 0.0, 'ppp'], tmp['angl'][0])
def test_read_aicg13_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 2, 3, 1, 2, 3, 6.9701, 1.0, 1.0, 1.3709, 0.15, 'ppp'], tmp['aicg13'][0])
def test_read_dihd_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, -126.5627, 0.0, 1.0, 0.0, 0.0, 'pppp'], tmp['dihd'][0])
def test_read_aicgdih_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, -126.5627, 1.0, 1.0, 0.435, 0.15, 'pppp'], tmp['aicgdih'][0])
def test_read_contact_from_ninfo_file(self):
tmp = Ninfo(self.testdata)
self.assertEqual([1, 1, 1, 1, 23, 1, 23, 7.2644, 1.0, 1, 0.16, 'p-p'], tmp['contact'][0])
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
Moguf/cafepy
|
test/test_ninfofile.py
|
Python
|
gpl-3.0
| 1,662
|
import urllib
counts = dict()
fhand = urllib.urlopen('http://www.py4inf.com/code/romeo.txt')
for line in fhand:
words = line.split()
for word in words:
counts[word] = counts.get(word,0) + 1
print counts
|
annelida/stuff
|
Scripts/urlwords.py
|
Python
|
mit
| 219
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-12 08:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisation', '0029_auto_20170707_1224'),
]
operations = [
migrations.AddField(
model_name='departmentuser',
name='azure_guid',
field=models.CharField(blank=True, help_text='Azure AD GUID.', max_length=48, null=True, unique=True),
),
]
|
rockychen-dpaw/oim-cms
|
organisation/migrations/0030_departmentuser_azure_guid.py
|
Python
|
apache-2.0
| 530
|
import argparse
from rdflib import Graph
def simple_merge(sources):
graph = Graph()
for source in sources:
print("Loading %s..." % source)
graph.parse(source)
return graph
def verbose_merge(sources):
# Prints out every newly merged triple and keep count of how many were already there.
graph = Graph()
print("Loading %s..." % sources[0])
graph.parse(sources[0])
for source in sources[1:]:
newgraph = Graph()
newgraph.parse(source)
added = 0
for triple in newgraph:
if triple in graph:
continue
print("Adding %s" % (triple, ))
graph.add(triple)
added += 1
print("Added %d triples. Ignored %d." % (added, len(newgraph) - added))
return graph
def main(sources, dest, verbose):
if verbose:
graph = verbose_merge(sources)
else:
graph = simple_merge(sources)
print("Saving to %s" % dest)
graph.serialize(dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Merge multiple source RDFs into one")
parser.add_argument('sources', help="Source RDF paths to merge together", nargs='+')
parser.add_argument('dest', help="Dest RDF path to save merged graph into")
parser.add_argument('--verbose', action='store_true', help="Activate verbose merge")
args = parser.parse_args()
main(args.sources, args.dest, args.verbose)
|
hsoft/dgeq-rdf
|
merge.py
|
Python
|
bsd-3-clause
| 1,448
|
import sys
def add_appsever_import_paths():
from dev_appserver import EXTRA_PATHS
for extra_path in EXTRA_PATHS:
if extra_path not in sys.path:
sys.path = [extra_path] + sys.path
def initialize_service_apis():
from google.appengine.tools import dev_appserver
from google.appengine.tools.dev_appserver_main import ParseArguments
args, option_dict = ParseArguments(sys.argv) # Otherwise the option_dict isn't populated.
dev_appserver.SetupStubs('local', **option_dict)
|
blairconrad/LibraryHippo
|
App/gael/testing.py
|
Python
|
mit
| 517
|
## -*- coding: utf-8 -*-
from.vendor.Qt import QtCore, QtGui, QtWidgets
from . import lib
class PartitionWidget(QtWidgets.QWidget):
def __init__(self, parent, data):
self.parent = parent
self.data = data
super(PartitionWidget, self).__init__(parent)
def mouseMoveEvent(self, event):
# 中クリックだけドラッグ&ドロップ可能にする
if event.buttons() != QtCore.Qt.MidButton:
return
# ドラッグ&ドロップされるデータ形式を代入
mimedata = QtCore.QMimeData()
drag = QtGui.QDrag(self)
drag.setMimeData(mimedata)
drag.exec_(QtCore.Qt.MoveAction)
def paintEvent(self, event):
# スタイルシートを利用
super(PartitionWidget, self).paintEvent(event)
opt = QtWidgets.QStyleOption()
opt.initFrom(self)
p = QtGui.QPainter(self)
s = self.style()
s.drawPrimitive(QtWidgets.QStyle.PE_Widget, opt, p, self)
# ラベルとラインの描画
painter = QtGui.QPainter(self)
color = QtGui.QColor(self.data.color)
pen = QtGui.QPen(color, self.data.line_width)
painter.setPen(pen)
font = QtGui.QFont()
font.setPointSize(self.data.label_font_size_view)
painter.setFont(font)
# ウィジェットの大きさを計算 上下左右マージンも考慮
_w = self.data.line_length
_h = self.data.margin + int(self.data.line_width * 1.5)
if self.data.use_label is True:
fm = painter.fontMetrics()
if _w < fm.width(self.data.label):
_w = fm.width(self.data.label)
if _h < fm.height():
_h = fm.height()
_w += self.data.margin * 2
_h += self.data.margin * 2
# ラインの配置ポイントを算出
_line_start_point = self.data.margin
_line_end_point = self.data.line_length + self.data.margin
if self.data.style == 0:
# 水平
self.resize(_w, _h)
if self.data.use_label is True:
_line_height_point = self.data.label_font_size + round(self.data.margin + self.data.line_width / 2)
else:
_line_height_point = self.data.margin + round(self.data.line_width / 2)
line = QtCore.QLine(
QtCore.QPoint(_line_start_point * self.data.temp_scale, _line_height_point * self.data.temp_scale),
QtCore.QPoint(_line_end_point * self.data.temp_scale, _line_height_point * self.data.temp_scale)
)
painter.drawLine(line)
if self.data.use_label is True:
painter.drawText(QtCore.QPoint(0, self.data.label_font_size), self.data.label)
elif self.data.style == 1:
# 垂直
self.resize(_h, _w)
line = QtCore.QLine(
QtCore.QPoint(self.data.margin * self.data.temp_scale, _line_start_point * self.data.temp_scale),
QtCore.QPoint(self.data.margin * self.data.temp_scale, _line_end_point * self.data.temp_scale)
)
painter.drawLine(line)
if self.data.use_label is True:
painter.rotate(90)
_p = QtCore.QPoint(self.data.margin * self.data.temp_scale, (-self.data.margin * 2 - round(self.data.line_width / 2)) * self.data.temp_scale)
painter.drawText(_p, self.data.label)
class PartitionData(lib.PartsData):
def __init__(self):
super(PartitionData, self).__init__()
self.color = '#aaaaaa'
self.style = 0 # 0:水平 1:垂直
self.line_width = 1
self.line_length = 150
self.margin = 2
def create(parent, data):
widget = PartitionWidget(parent, data)
widget.setObjectName(lib.random_string(15))
widget.show()
widget.move(data.position)
return widget
def update(widget, data):
font = widget.font()
font.setPointSize(data.label_font_size)
widget.move(data.position)
def get_default():
path = lib.get_partition_default_filepath()
data = PartitionData()
js = lib.not_escape_json_load(path)
if js is not None:
for k, v in js.items():
setattr(data, k, v)
return data
#-----------------------------------------------------------------------------
# EOF
#-----------------------------------------------------------------------------
|
mochio326/SiShelf
|
Contents/scripts/sishelf/partition.py
|
Python
|
mit
| 4,465
|
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import copy
from operator import attrgetter
from ryu import cfg
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
import setting
CONF = cfg.CONF
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.stats = {}
self.port_features = {}
self.free_bandwidth = {}
self.awareness = lookup_service_brick('awareness')
self.graph = None
self.capabilities = None
self.best_paths = None
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
self.save_freebandwidth_thread = hub.spawn(self._save_bw_graph)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath's info
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while CONF.weight == 'bw':
self.stats['flow'] = {}
self.stats['port'] = {}
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# refresh data.
self.capabilities = None
self.best_paths = None
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _save_bw_graph(self):
"""
Save bandwidth data into networkx graph object.
"""
while CONF.weight == 'bw':
self.graph = self.create_bw_graph(self.free_bandwidth)
self.logger.debug("save_freebandwidth")
hub.sleep(setting.MONITOR_PERIOD)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
def get_min_bw_of_links(self, graph, path, min_bw):
"""
Getting bandwidth of path. Actually, the mininum bandwidth
of links is the bandwith, because it is the neck bottle of path.
"""
_len = len(path)
if _len > 1:
minimal_band_width = min_bw
for i in xrange(_len-1):
pre, curr = path[i], path[i+1]
if 'bandwidth' in graph[pre][curr]:
bw = graph[pre][curr]['bandwidth']
minimal_band_width = min(bw, minimal_band_width)
else:
continue
return minimal_band_width
return min_bw
def get_best_path_by_bw(self, graph, paths):
"""
Get best path by comparing paths.
"""
capabilities = {}
best_paths = copy.deepcopy(paths)
for src in paths:
for dst in paths[src]:
if src == dst:
best_paths[src][src] = [src]
capabilities.setdefault(src, {src: setting.MAX_CAPACITY})
capabilities[src][src] = setting.MAX_CAPACITY
continue
max_bw_of_paths = 0
best_path = paths[src][dst][0]
for path in paths[src][dst]:
min_bw = setting.MAX_CAPACITY
min_bw = self.get_min_bw_of_links(graph, path, min_bw)
if min_bw > max_bw_of_paths:
max_bw_of_paths = min_bw
best_path = path
best_paths[src][dst] = best_path
capabilities.setdefault(src, {dst: max_bw_of_paths})
capabilities[src][dst] = max_bw_of_paths
self.capabilities = capabilities
self.best_paths = best_paths
return capabilities, best_paths
def create_bw_graph(self, bw_dict):
"""
Save bandwidth data into networkx graph object.
"""
try:
graph = self.awareness.graph
link_to_port = self.awareness.link_to_port
for link in link_to_port:
(src_dpid, dst_dpid) = link
(src_port, dst_port) = link_to_port[link]
if src_dpid in bw_dict and dst_dpid in bw_dict:
bw_src = bw_dict[src_dpid][src_port]
bw_dst = bw_dict[dst_dpid][dst_port]
bandwidth = min(bw_src, bw_dst)
# add key:value of bandwidth into graph.
graph[src_dpid][dst_dpid]['bandwidth'] = bandwidth
else:
graph[src_dpid][dst_dpid]['bandwidth'] = 0
return graph
except:
self.logger.info("Create bw graph exception")
if self.awareness is None:
self.awareness = lookup_service_brick('awareness')
return self.awareness.graph
def _save_freebandwidth(self, dpid, port_no, speed):
# Calculate free bandwidth of port and save it.
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = port_state[2]
curr_bw = self._get_free_bw(capacity, speed)
self.free_bandwidth[dpid].setdefault(port_no, None)
self.free_bandwidth[dpid][port_no] = curr_bw
else:
self.logger.info("Fail in getting port state")
def _save_stats(self, _dict, key, value, length):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# BW:Mbit/s
return max(capacity/10**3 - speed * 8/10**6, 0)
def _get_time(self, sec, nsec):
return sec + nsec / (10 ** 9)
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply info into self.flow_stats.
Calculate flow speed and Save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('ipv4_dst'))):
key = (stat.match['in_port'], stat.match.get('ipv4_dst'),
stat.instructions[0].actions[0].port)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3],
tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1],
pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats info
Calculate port's speed and save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
self.free_bandwidth.setdefault(dpid, {})
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
pre = tmp[-2][0] + tmp[-2][1]
period = self._get_period(tmp[-1][3], tmp[-1][4],
tmp[-2][3], tmp[-2][4])
speed = self._get_speed(
self.port_stats[key][-1][0] + self.port_stats[key][-1][1],
pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
port_feature = (config, state, p.curr_speed)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (port_no, reason)
def show_stat(self, type):
'''
Show statistics info according to data type.
type: 'port' 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[type]
if(type == 'flow'):
print('datapath '' in-port ip-dst '
'out-port packets bytes flow-speed(B/s)')
print('---------------- '' -------- ----------------- '
'-------- -------- -------- -----------')
for dpid in bodys.keys():
for stat in sorted(
[flow for flow in bodys[dpid] if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('ipv4_dst'))):
print('%016x %8x %17s %8x %8d %8d %8.1f' % (
dpid,
stat.match['in_port'], stat.match['ipv4_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][
(stat.match.get('in_port'),
stat.match.get('ipv4_dst'),
stat.instructions[0].actions[0].port)][-1])))
print '\n'
if(type == 'port'):
print('datapath port ''rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error port-speed(B/s)'
' current-capacity(Kbps) '
'port-stat link-stat')
print('---------------- -------- ''-------- -------- -------- '
'-------- -------- -------- '
'---------------- ---------------- '
' ----------- -----------')
format = '%016x %8x %8d %8d %8d %8d %8d %8d %8.1f %16d %16s %16s'
for dpid in bodys.keys():
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors,
abs(self.port_speed[(dpid, stat.port_no)][-1]),
self.port_features[dpid][stat.port_no][2],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print '\n'
|
muzixing/ryu
|
ryu/app/network_awareness/network_monitor.py
|
Python
|
apache-2.0
| 16,706
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Salactus, eater of s3 buckets.
"""
from __future__ import print_function
from collections import Counter
import csv
import functools
import json
import logging
import operator
import click
from rq.registry import FinishedJobRegistry, StartedJobRegistry
from rq.queue import Queue
from rq.worker import Worker
from c7n_salactus import worker, db
def debug(f):
def _f(*args, **kw):
try:
f(*args, **kw)
except (SystemExit, KeyboardInterrupt) as e:
raise
except:
import traceback, sys, pdb
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
functools.update_wrapper(_f, f)
return _f
@click.group()
def cli():
"""Salactus, eater of s3 buckets"""
@cli.command()
@click.option('--config', help='config file for accounts/buckets')
@click.option('--tag', help='filter accounts by tag')
@click.option('--account', '-a',
help='scan only the given accounts', multiple=True)
@click.option('--bucket', '-b',
help='scan only the given buckets', multiple=True)
@click.option('--debug', is_flag=True,
help='synchronous scanning, no workers')
def run(config, tag, bucket, account, debug=False):
"""Run across a set of accounts and buckets."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(level=logging.WARNING)
if debug:
def invoke(f, *args, **kw):
if f.func_name == 'process_keyset':
print("skip keyset")
return
return f(*args, **kw)
worker.invoke = invoke
with open(config) as fh:
data = json.load(fh)
for account_info in data:
if tag and tag not in account_info.get('tags', ()):
continue
if account and account_info['name'] not in account:
continue
if bucket:
account_info['buckets'] = bucket
worker.invoke(worker.process_account, account_info)
@cli.command()
@click.option('--dbpath', help='path to json file')
def save(dbpath):
"""Save the current state to a json file
"""
d = db.db()
d.save(dbpath)
@cli.command()
@click.option('--dbpath', help='path to json file')
def reset(dbpath):
"""Save the current state to a json file
"""
click.echo('Delete db? Are you Sure? [yn] ', nl=False)
c = click.getchar()
click.echo()
if c == 'y':
click.echo('Wiping database')
worker.connection.flushdb()
elif c == 'n':
click.echo('Abort!')
else:
click.echo('Invalid input :(')
@cli.command()
def workers():
counter = Counter()
for w in Worker.all(connection=worker.connection):
for q in w.queues:
counter[q.name] += 1
import pprint
pprint.pprint(dict(counter))
@cli.command()
@click.option('--dbpath', '-f', help='json stats db')
@click.option('--account', '-a',
help="stats on a particular account", multiple=True)
def accounts(dbpath, account):
"""Report on stats by account"""
d = db.db(dbpath)
def _repr(a):
return "name:%s, matched:%d percent:%0.2f scanned:%d size:%d buckets:%d" % (
a.name,
a.matched,
a.percent_scanned,
a.scanned,
a.size,
len(a.buckets))
for a in sorted(d.accounts(), key=operator.attrgetter('name')):
click.echo(_repr(a))
def format_plain(buckets, fh):
def _repr(b):
return (
"account:%s name:%s percent:%0.2f matched:%d "
"scanned:%d size:%d kdenied:%d errors:%d partitions:%d") % (
b.account,
b.name,
b.percent_scanned,
b.matched,
b.scanned,
b.size,
b.keys_denied,
b.error_count,
b.partitions)
for b in buckets:
print(_repr(b), file=fh)
def format_csv(buckets, fh):
field_names = ['account', 'name', 'matched', 'scanned',
'size', 'keys_denied', 'error_count', 'partitions']
totals = Counter()
skip = set(('account', 'name', 'percent'))
for b in buckets:
for n in field_names:
if n in skip:
continue
totals[n] += getattr(b, n)
totals['account'] = 'Total'
totals['name'] = ''
writer = csv.DictWriter(fh, fieldnames=field_names, extrasaction='ignore')
writer.writerow(dict(zip(field_names, field_names)))
writer.writerow(totals)
for b in buckets:
bd = {n: getattr(b, n) for n in field_names}
writer.writerow(bd)
@cli.command()
@click.option('--dbpath', '-f', help="json stats db")
@click.option('--output', '-o', type=click.File('wb'), default='-',
help="file to to output to (default stdout)")
@click.option('--format', help="format for output",
type=click.Choice(['plain', 'csv']), default='plain')
@click.option('--bucket', '-b',
help="stats on a particular bucket", multiple=True)
@click.option('--account', '-a',
help="stats on a particular account", multiple=True)
@click.option('--matched', is_flag=True,
help="filter to buckets with matches")
@click.option('--kdenied', is_flag=True,
help="filter to buckets w/ denied key access")
@click.option('--denied', is_flag=True,
help="filter to buckets denied access")
@click.option('--errors', is_flag=True,
help="filter to buckets with errors")
@click.option('--size', type=int,
help="filter to buckets with at least size")
def buckets(bucket=None, account=None, matched=False, kdenied=False,
errors=False, dbpath=None, size=None, denied=False,
format=None, output=None):
"""Report on stats by bucket"""
d = db.db(dbpath)
buckets = []
for b in sorted(d.buckets(account),
key=operator.attrgetter('bucket_id')):
if bucket and b.name not in bucket:
continue
if matched and not b.matched:
continue
if kdenied and not b.keys_denied:
continue
if errors and not b.errors:
continue
if size and b.size < size:
continue
if denied and not b.denied:
continue
buckets.append(b)
formatter = format == 'csv' and format_csv or format_plain
formatter(buckets, output)
@cli.command()
def queues():
"""Reeport on progress by queues."""
conn = worker.connection
failure_q = None
def _repr(q):
return "running:%d pending:%d finished:%d" % (
StartedJobRegistry(q.name, conn).count,
q.count,
FinishedJobRegistry(q.name, conn).count)
for q in Queue.all(conn):
if q.name == 'failed':
failure_q = q
continue
click.echo("%s %s" % (q.name, _repr(q)))
if failure_q:
click.echo(
click.style(failure_q.name, fg='red') + ' %s' % _repr(failure_q))
@cli.command()
def failures():
"""Show any unexpected failures"""
q = Queue('failed', connection=worker.connection)
for i in q.get_jobs():
click.echo("%s on %s" % (i.func_name, i.origin))
click.echo(i.exc_info)
if __name__ == '__main__':
cli()
|
andrewalexander/cloud-custodian
|
tools/c7n_salactus/c7n_salactus/cli.py
|
Python
|
apache-2.0
| 8,030
|
# __init__.py for gempy.eti module
__version__ = '0.1.0 (May 2010)'
import gemcombineeti
import gireduceeti
import gmosaiceti
import gsappwaveeti
import gscrrejeti
import gsextracteti
import gsflateti
import gsskysubeti
import gstransformeti
import gswavelengtheti
import sploteti
|
pyrrho314/recipesystem
|
trunk/gempy/gemini/eti/__init__.py
|
Python
|
mpl-2.0
| 283
|
"""
Based on Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from collections import namedtuple
DomMatch = namedtuple('DOMMatch', ['attrs', 'content'])
re_type = type(re.compile(''))
def __get_dom_content(html, name, match):
if match.endswith('/>'): return ''
# override tag name with tag from match if possible
tag = re.match('<([^\s/>]+)', match)
if tag: name = tag.group(1)
start_str = '<%s' % name
end_str = "</%s" % name
# start/end tags without matching case cause issues
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
return result
def __get_dom_elements(item, name, attrs):
if not attrs:
pattern = '(<%s(?:\s[^>]*>|/?>))' % name
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key, value in attrs.iteritems():
value_is_regex = isinstance(value, re_type)
value_is_str = isinstance(value, basestring)
pattern = '''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[2])]
else:
temp_value = [value] if value_is_str else value
this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))]
if not this_list:
has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value)
if not has_space:
pattern = '''(<{tag}[^>]*\s{key}=([^\s/>]*)[^>]*>)'''.format(tag=name, key=key)
re_list = re.findall(pattern, item, re.M | re.S | re.I)
if value_is_regex:
this_list = [r[0] for r in re_list if re.match(value, r[1])]
else:
this_list = [r[0] for r in re_list if value == r[1]]
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def __get_attribs(element):
attribs = {}
for match in re.finditer('''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element):
match = match.groupdict()
value1 = match.get('value1')
value2 = match.get('value2')
value = value1 if value1 is not None else value2
if value is None: continue
attribs[match['key'].lower().strip()] = value
return attribs
def parse_dom(html, name='', attrs=None, req=False):
if attrs is None: attrs = {}
name = name.strip()
if isinstance(html, unicode) or isinstance(html, DomMatch):
html = [html]
elif isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif not isinstance(html, list):
return ''
if not name:
return ''
if not isinstance(attrs, dict):
return ''
if req:
if not isinstance(req, list):
req = [req]
req = set([key.lower() for key in req])
all_results = []
for item in html:
if isinstance(item, DomMatch):
item = item.content
results = []
for element in __get_dom_elements(item, name, attrs):
attribs = __get_attribs(element)
if req and not req <= set(attribs.keys()): continue
temp = __get_dom_content(item, name, element).strip()
results.append(DomMatch(attribs, temp))
item = item[item.find(temp, item.find(element)):]
all_results += results
return all_results
|
felipenaselva/felipe.repository
|
plugin.video.streamhub/resources/lib/modules/dom_parser.py
|
Python
|
gpl-2.0
| 5,193
|
import os
import os.path
from amuse.units import units
from amuse.datamodel import Particle
from amuse.ext.star_to_sph import pickle_stellar_model
from amuse.community.mesa.interface import MESA as stellar_evolution_code
from xiTau_parameters import triple_parameters
def evolve_giant(giant, stop_radius):
stellar_evolution = stellar_evolution_code()
giant_in_code = stellar_evolution.particles.add_particle(giant)
while (giant_in_code.radius < 0.7 | units.AU):
giant_in_code.evolve_one_step()
print "Giant starts to ascend the giant branch, now saving model every step..."
print giant_in_code.as_set()
i = 0
while (giant_in_code.radius < stop_radius):
giant_in_code.evolve_one_step()
print giant_in_code.radius, giant_in_code.age
pickle_file_name = "./model_{0:=04}_".format(i) + "%0.1f"%(giant_in_code.radius.value_in(units.AU))
pickle_stellar_model(giant_in_code, pickle_file_name)
i += 1
if __name__ == "__main__":
model_directory = os.path.join("../../../../../BIGDATA/code/amuse-10.0", "giant_models")
if not os.path.exists(model_directory):
os.mkdir(model_directory)
os.chdir(model_directory)
giant = Particle(mass = triple_parameters["mass_out"])
print "\nEvolving with", stellar_evolution_code.__name__
evolve_giant(giant, 1.0 | units.AU)
print "Done"
|
hilaglanz/TCE
|
articles/A_evolve_outer_star_to_giant.py
|
Python
|
gpl-2.0
| 1,405
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def imbalance():
print("Test checks if Deep Learning works fine with an imbalanced dataset")
covtype = h2o.upload_file(pyunit_utils.locate("smalldata/covtype/covtype.20k.data"))
covtype[54] = covtype[54].asfactor()
hh_imbalanced = H2ODeepLearningEstimator(l1=1e-5, activation="Rectifier",
loss="CrossEntropy", hidden=[200,200], epochs=1,
balance_classes=False,reproducible=True,
seed=1234)
hh_imbalanced.train(x=list(range(54)),y=54, training_frame=covtype)
print(hh_imbalanced)
hh_balanced = H2ODeepLearningEstimator(l1=1e-5, activation="Rectifier",
loss="CrossEntropy", hidden=[200,200], epochs=1,
balance_classes=True,reproducible=True,
seed=1234)
hh_balanced.train(x=list(range(54)),y=54,training_frame=covtype)
print(hh_balanced)
#compare overall logloss
class_6_err_imbalanced = hh_imbalanced.logloss()
class_6_err_balanced = hh_balanced.logloss()
if class_6_err_imbalanced < class_6_err_balanced:
print("--------------------")
print("")
print("FAIL, balanced error greater than imbalanced error")
print("")
print("")
print("class_6_err_imbalanced")
print(class_6_err_imbalanced)
print("")
print("class_6_err_balanced")
print(class_6_err_balanced)
print("")
print("--------------------")
assert class_6_err_imbalanced >= class_6_err_balanced, "balance_classes makes it worse!"
if __name__ == "__main__":
pyunit_utils.standalone_test(imbalance)
else:
imbalance()
|
mathemage/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_imbalance_deeplearning_large.py
|
Python
|
apache-2.0
| 1,947
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from datetime import datetime
from .forms import TaskForm
from django.template import RequestContext, loader
from django.template.context_processors import csrf
from .models import Activity, TaskItem
import logging
from django.contrib.auth.decorators import login_required,user_passes_test
import pytz
from datetime import datetime
timesheet_logger = logging.getLogger("timesheetlog")
@login_required
@user_passes_test(lambda u: u.groups.filter(name="normal_user").count()!=0)
def activity_view(request):
"""
View function for renewing a specific BookInstance by librarian
"""
# todo: need code to verity input is valid.
user_id = request.user.id
add_activity_msg = None
add_activity_error = []
bj_tz = pytz.timezone(pytz.country_timezones("cn")[0])
if request.method == 'POST':
task_form = TaskForm(request, request.POST)
if task_form.is_valid():
cd = task_form.cleaned_data
task_name = cd['selected_task']
task_model = TaskItem.objects.filter(task_name=task_name)
start_time = datetime.strptime(cd["datetime_start"],'%m/%d/%Y %I:%M %p')
logging.error(start_time)
start_time = bj_tz.localize(start_time)
logging.debug(start_time)
start_time = bj_tz.normalize(start_time)
end_time = datetime.strptime(cd['datetime_end'], '%m/%d/%Y %I:%M %p')
end_time = bj_tz.localize(end_time)
end_time = bj_tz.normalize(end_time)
# check if time slot is already taken
# 1. check if current start_time is in any token duration. if yes, then ERROR. if NOT , go on.
# 2. get first item which start_time is larger than current start_time, if NO such item, then OK. OR GO ON.
# 3. check if current end_time is larger than first_item.start. if YES, ERROR.
all_activity_item = Activity.objects.all()
for item in all_activity_item:
if start_time >= item.start_time and start_time < item.end_time:
add_activity_error.append("start time is in token time interval, current activity: {0} -- {1} existing activity: {2} -- {3} : {4}".format(
start_time,end_time,item.start_time.astimezone(bj_tz),item.end_time.astimezone(bj_tz),item.task_name))
break
if len(add_activity_error) == 0:
gt_item = Activity.objects.filter(start_time__gt=start_time).order_by("start_time")
if len(gt_item) != 0:
first_gt_item = gt_item[0]
if end_time > first_gt_item.start_time:
add_activity_error.append("end time is too wide, current activity: {0} -- {1} existing activity: {2} -- {3} : {4}".format(
start_time,end_time,first_gt_item.start_time.astimezone(bj_tz),first_gt_item.end_time.astimezone(bj_tz),first_gt_item.task_name))
rest_duration = int(cd['rest_duration'])
total_duration = (end_time - start_time).seconds / 60
total_duration = total_duration - rest_duration
if total_duration <= 0:
add_activity_error.append("Rest time is bigger than time duration")
if len(add_activity_error) == 0:
activity = Activity(task_name=task_model[0], start_time=start_time, end_time=end_time,\
rest_duration=rest_duration, total_duration=total_duration,user_id=user_id)
activity.save()
add_activity_msg = "Successfully add activity:\n {0} TO {1} : {2} (Rest: {3} Total Time {4} (minutes))".format(
start_time, end_time, task_name, rest_duration, total_duration)
task_form = TaskForm(request)
# context = RequestContext(request, {'task_form': task_form, "add_activity_msg":add_activity_msg,\
# "add_activity_error": add_activity_error, 'user_name': request.user.username})
context = {'task_form': task_form, "add_activity_msg":add_activity_msg,\
"add_activity_error": add_activity_error, 'user_name': request.user.username}
context.update(csrf(request))
# timesheet_logger.debug(context)
template = loader.get_template('activity_view.html')
return HttpResponse(template.render(context))
|
scotthuang1989/timesheet
|
activity/views.py
|
Python
|
apache-2.0
| 4,632
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}i
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8168)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18168)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
hexxcointakeover/hexxcoin
|
contrib/seeds/generate-seeds.py
|
Python
|
mit
| 4,332
|
# -*- coding: utf-8 -*-
import os
from math import factorial
import functools
def memoize(func):
cache = {}
def memoized(key):
# Returned, new, memoized version of decorated function
if key not in cache:
cache[key] = func(key)
return cache[key]
return functools.update_wrapper(memoized, func)
@memoize
def fact(n):
return factorial(n)
def cat_direct(n):
return fact(2 * n) // fact(n + 1) // fact(n)
@memoize
def catR1(n):
return (1 if n == 0
else sum(catR1(i) * catR1(n - 1 - i)
for i in range(n)))
@memoize
def catR2(n):
return (1 if n == 0
else ((4 * n - 2) * catR2(n - 1)) // (n + 1))
if __name__ == '__main__':
def pr(results):
fmt = '%-10s %-10s %-10s'
print((fmt % tuple(c.__name__ for c in defs)).upper())
print(fmt % (('=' * 10,) * 3))
for r in zip(*results):
print(fmt % r)
defs = (cat_direct, catR1, catR2)
results = [tuple(c(i) for i in range(15)) for c in defs]
pr(results)
os.system("pause")
|
NicovincX2/Python-3.5
|
Algorithmique/Mathématiques discrètes/Combinatoire/Nombre de Catalan/nb_catalan_3methods.py
|
Python
|
gpl-3.0
| 1,091
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import tabs
from openstack_dashboard.dashboards.admin.info import tabs as project_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.SystemInfoTabs
template_name = 'admin/info/index.html'
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/info/views.py
|
Python
|
apache-2.0
| 1,045
|
import time
from pigredients.ics import ws2801 as ws2801
if __name__ == '__main__':
led_chain = ws2801.WS2801_Chain()
led_chain.all_off()
led_chain.write()
led_chain.set_ic(ic_id=24, rgb_value=[255,0,255])
led_chain.write()
time.sleep(2)
led_chain.set_ic(ic_id=24, rgb_value=[0,0,0])
led_chain.write()
time.sleep(0.5)
led_chain.set_white()
led_chain.write()
time.sleep(0.5)
led_chain.set_red()
led_chain.write()
time.sleep(0.5)
led_chain.set_green()
led_chain.write()
time.sleep(0.5)
led_chain.set_blue()
led_chain.write()
time.sleep(0.5)
led_chain.set_white()
led_chain.write()
time.sleep(0.5)
led_chain.cycle()
led_chain.set_off()
led_chain.write()
|
rasathus/pigredients
|
examples/ws2801.py
|
Python
|
mit
| 795
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return nn_ops.max_pool3d_grad(op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat(
0,
[array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[-2:])]
)
tile_mults = array_ops.concat(0, [shape[:-3], [1], shape[-2:]])
else:
expanded_shape = array_ops.concat(0, [array_ops.ones_like(shape[:-1]), bias_shape])
tile_mults = array_ops.concat(0, [shape[:-1], [1]])
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(
x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + 1),
array_ops.zeros(shape = array_ops.shape(x), dtype = x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon))
grad_offset: gradient for offset, which is sum(grad_y)
"""
return gen_nn_ops.fused_batch_norm_grad(
grad[0],
op.inputs[0],
op.inputs[1],
op.outputs[3],
op.outputs[4],
epsilon=op.get_attr("epsilon"),
data_format=op.get_attr("data_format"),
is_training=op.get_attr("is_training"))
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
|
laosiaudi/tensorflow
|
tensorflow/python/ops/nn_grad.py
|
Python
|
apache-2.0
| 20,435
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
This file implement the tcp server for account-manager.
Multi Thread.
"""
__version__ = "0.1"
DEFAULT_ERROR_MESSAGE = """
Error Code:%(code)d.
Message:%(message)s.
Code Explain:%(explain)s.
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
import sys
import time
import socket # For gethostbyaddr()
import json
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
from config import *
from socket_svr import *
class AMRequestHandler(StreamRequestHandler):
"""account-manager(AM) svr request handler base class.
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseAM/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to AM 0.1, i.e. don't send a status line.
default_request_version = "AM/0.1"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'AM/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "AM/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid AM Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad AM/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "AM/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single AM request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific AM
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout, e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'AM/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'AM/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'AM/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip address and current date/time are prefixed to every
message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the AM protocol we support.
# Set this to AM/1.1 to enable automatic keepalive
protocol_version = "AM/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('AM Version Not Supported', 'Cannot fulfill request.'),
}
def send_head(self):
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def parse_real_request(self):
""" Parse json request string (key, value) dict (Internal).
return dict.
"""
pass
def append_result(self, key, value):
""" Append key&value to result dict.
"""
self.result[key] = value;
def make_real_result(self):
""" Make obj string to json string.
return json string.
"""
self.json_result = json.dumps(self.result, sort_keys=True, separators=(',',':'))
def send_result(self):
""" Send json_result to client.
"""
self.wfile.write(self.json_result)
def do_VERSION(self):
self.send_head()
self.append_result("VERSION", AMRequestHandler.server_version)
self.make_real_result()
self.send_result()
def do_REGISTER(self):
self.send_head()
self.append_result("ERRNO", "{0}".format(self.errno))
self.make_real_result()
self.send_result()
def do_LOGIN(self):
pass
def do_LOGOUT(self):
pass
def do_GET(self):
pass
def do_POST(self):
pass
def do_DEL(self):
pass
def do_MOD(self):
pass
class AccountManagerSvr(ThreadingTCPServer):
""" TCP Svr of account-manager.
"""
allow_reuse_address = 1 # Seems to make sense in testing environment
"""
def __init__(self, config_file):
#super(ThreadingTCPServer, self).__init__(address, AMRequestHandler)
#ThreadingTCPServer.__init__(address, AMRequestHandler)
if __debug__ :
print "Address is %s:%d.", address
"""
def server_bind(self):
"""Override server_bind to store the server name."""
TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def test(config_file):
"""Test the account-manager svr.
"""
config(config_file)
address = (config.config_tcp_svr_addr, config.config_tcp_svr_port)
svr = AccountManagerSvr(address, AMRequestHandler)
sa = svr.socket.getsockname()
print "account-manager Tcp Server Serving on ", sa[0], " port ", sa[1], " ..."
svr.serve_forever()
if __name__ == '__main__':
test("_config.json")
|
lifulong/account-manager
|
src/core/svr.py
|
Python
|
gpl-2.0
| 15,228
|
import os
import sys
BASE_DIR = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, BASE_DIR)
import argparse
import time
import datetime
from flask import Flask, g, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from server.data import Contest
import lib.models as models
from server.util import context_processor
from server.routes.default import default
from server.routes.judge import judge
# REDIRECT_SUB = re.compile('^http://localhost(:[0-9]+)?')
# REDIRECT_SUB_FOR = 'http://localhost/fk_2013_beta'
# class MyFlask(Flask):
# def process_response(self, response):
# global opts
# if opts.prefix is not None and opts.hostname is not None and response.status_code == 301:
# response.headers['Location'] = re.sub(REDIRECT_SUB, 'http://' + opts.hostname + opts.prefix, response.headers['Location'])
# return response
# app = MyFlask(__name__)
class ReverseProxied(object):
def __init__(self, app, script_name):
self.app = app
self.script_name = script_name
def __call__(self, environ, start_response):
if self.script_name:
environ['SCRIPT_NAME'] = self.script_name
path_info = environ['PATH_INFO']
if path_info.startswith(self.script_name):
environ['PATH_INFO'] = path_info[len(self.script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__)
app.secret_key = "V=7Km+XXkg:}>4dT0('cV>Rp1TG82QEjah+X'v;^w:)a']y)^%"
db = app.db = SQLAlchemy(app)
models.register_base(db)
opts = app.opts = None
contest = app.contest = None
app.context_processor(context_processor)
app.register_blueprint(default)
app.register_blueprint(judge, url_prefix="/judge")
@app.before_request
def before_request():
g.request_start_time = time.time()
g.request_time = lambda: time.time() - g.request_start_time
@app.template_filter('format_time')
def template_format_time(time):
# return '%02d:%02d:%02d' % (int(time//60//60), int(time//60)%60, int(time)%60)
if isinstance(time, datetime.datetime):
time = (time - app.contest.start).total_seconds()
return '%02d:%02d' % (int(time // 60), int(time) % 60)
@app.route('/')
def index():
return render_template('index.html')
def main(argv):
global app
parser = argparse.ArgumentParser(description='A minimalistic programming contest environment.')
parser.add_argument('contest', help='the contest directory')
parser.add_argument('-p', '--port', default=31415, type=int, help='the port to listen on')
parser.add_argument('-H', '--host', default='', help='the host to listen on')
parser.add_argument('-d', '--debug', default=False, action='store_true', help='run in debug mode')
parser.add_argument('--prefix', default=None, help='run under prefix')
parser.add_argument('--server_name', default=None, help='server name')
# parser.add_argument('--hostname', default=None, help='run with the specified hostname')
parser.add_argument('--droptables', default=False, action='store_true', help='drop database tables and exit')
opts = app.opts = parser.parse_args(argv)
contest = app.contest = Contest.load(opts.contest)
app.config['SQLALCHEMY_DATABASE_URI'] = contest.db
if opts.prefix:
# app.config['APPLICATION_ROOT'] = opts.prefix
app.wsgi_app = ReverseProxied(app.wsgi_app, opts.prefix)
if opts.server_name:
app.config['SERVER_NAME'] = opts.server_name
models.set_contest_id(contest.id)
db.init_app(app)
if opts.droptables:
print('You are about to drop the database tables for contest %s!!!' % contest.id)
if input('Are you sure you want to continue? (y/N) ').lower() == 'y':
db.drop_all(app=app)
return 0
db.create_all(app=app)
app.run(host=opts.host, port=opts.port, debug=opts.debug)
if __name__ == '__main__':
main(sys.argv[1:])
|
SuprDewd/epsilon
|
server/epsilon.py
|
Python
|
mit
| 4,043
|
"""
:synopsis: most ajax processors for askbot
This module contains most (but not all) processors for Ajax requests.
Not so clear if this subdivision was necessary as separation of Ajax and non-ajax views
is not always very clean.
"""
import datetime
import logging
from bs4 import BeautifulSoup
from django.conf import settings as django_settings
from django.core import exceptions
#from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.forms import ValidationError, IntegerField, CharField
from django.shortcuts import get_object_or_404
from django.views.decorators import csrf
from django.utils import simplejson
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import string_concat
from askbot.utils.slug import slugify
from askbot import models
from askbot import forms
from askbot.conf import should_show_sort_by_relevance
from askbot.conf import settings as askbot_settings
from askbot.models.tag import get_global_group
from askbot.utils import category_tree
from askbot.utils import decorators
from askbot.utils import url_utils
from askbot.utils.forms import get_db_object_or_404
from askbot import mail
from django.template import Context
from askbot.skins.loaders import render_into_skin, get_template
from askbot.skins.loaders import render_into_skin_as_string
from askbot.skins.loaders import render_text_into_skin
from askbot import const
@csrf.csrf_exempt
def manage_inbox(request):
"""delete, mark as new or seen user's
response memo objects, excluding flags
request data is memo_list - list of integer id's of the ActivityAuditStatus items
and action_type - string - one of delete|mark_new|mark_seen
"""
response_data = dict()
try:
if request.is_ajax():
if request.method == 'POST':
post_data = simplejson.loads(request.raw_post_data)
if request.user.is_authenticated():
activity_types = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY
activity_types += (
const.TYPE_ACTIVITY_MENTION,
const.TYPE_ACTIVITY_MARK_OFFENSIVE,
const.TYPE_ACTIVITY_MODERATED_NEW_POST,
const.TYPE_ACTIVITY_MODERATED_POST_EDIT
)
user = request.user
memo_set = models.ActivityAuditStatus.objects.filter(
id__in = post_data['memo_list'],
activity__activity_type__in = activity_types,
user = user
)
action_type = post_data['action_type']
if action_type == 'delete':
memo_set.delete()
elif action_type == 'mark_new':
memo_set.update(status = models.ActivityAuditStatus.STATUS_NEW)
elif action_type == 'mark_seen':
memo_set.update(status = models.ActivityAuditStatus.STATUS_SEEN)
elif action_type == 'remove_flag':
for memo in memo_set:
activity_type = memo.activity.activity_type
if activity_type == const.TYPE_ACTIVITY_MARK_OFFENSIVE:
request.user.flag_post(
post = memo.activity.content_object,
cancel_all = True
)
elif activity_type in \
(
const.TYPE_ACTIVITY_MODERATED_NEW_POST,
const.TYPE_ACTIVITY_MODERATED_POST_EDIT
):
post_revision = memo.activity.content_object
request.user.approve_post_revision(post_revision)
memo.delete()
#elif action_type == 'close':
# for memo in memo_set:
# if memo.activity.content_object.post_type == "exercise":
# request.user.close_exercise(exercise = memo.activity.content_object, reason = 7)
# memo.delete()
elif action_type == 'delete_post':
for memo in memo_set:
content_object = memo.activity.content_object
if isinstance(content_object, models.PostRevision):
post = content_object.post
else:
post = content_object
request.user.delete_post(post)
reject_reason = models.PostFlagReason.objects.get(
id = post_data['reject_reason_id']
)
template = get_template('email/rejected_post.html')
data = {
'post': post.html,
'reject_reason': reject_reason.details.html
}
body_text = template.render(Context(data))
mail.send_mail(
subject_line = _('your post was not accepted'),
body_text = unicode(body_text),
recipient_list = [post.author.email,]
)
memo.delete()
user.update_response_counts()
response_data['success'] = True
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
else:
raise exceptions.PermissionDenied(
_('Sorry, but anonymous users cannot access the inbox')
)
else:
raise exceptions.PermissionDenied('must use POST request')
else:
#todo: show error page but no-one is likely to get here
return HttpResponseRedirect(reverse('index'))
except Exception, e:
message = unicode(e)
if message == '':
message = _('Oops, apologies - there was some error')
response_data['message'] = message
response_data['success'] = False
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
def process_vote(user = None, vote_direction = None, post = None):
"""function (non-view) that actually processes user votes
- i.e. up- or down- votes
in the future this needs to be converted into a real view function
for that url and javascript will need to be adjusted
also in the future make keys in response data be more meaningful
right now they are kind of cryptic - "status", "count"
"""
if user.is_anonymous():
raise exceptions.PermissionDenied(_(
'Sorry, anonymous users cannot vote'
))
user.assert_can_vote_for_post(post = post, direction = vote_direction)
vote = user.get_old_vote_for_post(post)
response_data = {}
if vote != None:
user.assert_can_revoke_old_vote(vote)
score_delta = vote.cancel()
response_data['count'] = post.points+ score_delta
response_data['status'] = 1 #this means "cancel"
else:
#this is a new vote
votes_left = user.get_unused_votes_today()
if votes_left <= 0:
raise exceptions.PermissionDenied(
_('Sorry you ran out of votes for today')
)
votes_left -= 1
if votes_left <= \
askbot_settings.VOTES_LEFT_WARNING_THRESHOLD:
msg = _('You have %(votes_left)s votes left for today') \
% {'votes_left': votes_left }
response_data['message'] = msg
if vote_direction == 'up':
vote = user.upvote(post = post)
else:
vote = user.downvote(post = post)
response_data['count'] = post.points
response_data['status'] = 0 #this means "not cancel", normal operation
response_data['success'] = 1
return response_data
@csrf.csrf_exempt
def vote(request, id):
"""
todo: this subroutine needs serious refactoring it's too long and is hard to understand
vote_type:
acceptProblem : 0,
exerciseUpVote : 1,
exerciseDownVote : 2,
favorite : 4,
problemUpVote: 5,
problemDownVote:6,
offensiveExercise : 7,
remove offensiveExercise flag : 7.5,
remove all offensiveExercise flag : 7.6,
offensiveProblem:8,
remove offensiveProblem flag : 8.5,
remove all offensiveProblem flag : 8.6,
removeExercise: 9,
removeProblem:10
exerciseSubscribeUpdates:11
exerciseUnSubscribeUpdates:12
accept problem code:
response_data['allowed'] = -1, Accept his own problem 0, no allowed - Anonymous 1, Allowed - by default
response_data['success'] = 0, failed 1, Success - by default
response_data['status'] = 0, By default 1, Problem has been accepted already(Cancel)
vote code:
allowed = -3, Don't have enough votes left
-2, Don't have enough reputation score
-1, Vote his own post
0, no allowed - Anonymous
1, Allowed - by default
status = 0, By default
1, Cancel
2, Vote is too old to be canceled
offensive code:
allowed = -3, Don't have enough flags left
-2, Don't have enough reputation score to do this
0, not allowed
1, allowed
status = 0, by default
1, can't do it again
"""
response_data = {
"allowed": 1,
"success": 1,
"status" : 0,
"count" : 0,
"message" : ''
}
try:
if request.is_ajax() and request.method == 'POST':
vote_type = request.POST.get('type')
else:
raise Exception(_('Sorry, something is not right here...'))
if vote_type == '0':
if request.user.is_authenticated():
problem_id = request.POST.get('postId')
problem = get_object_or_404(models.Post, post_type='problem', id = problem_id)
# make sure exercise author is current user
if problem.accepted():
request.user.unaccept_best_problem(problem)
response_data['status'] = 1 #cancelation
else:
request.user.accept_best_problem(problem)
####################################################################
problem.thread.update_summary_html() # regenerate exercise/thread summary html
####################################################################
else:
raise exceptions.PermissionDenied(
_('Sorry, but anonymous users cannot accept problems')
)
elif vote_type in ('1', '2', '5', '6'):#Q&A up/down votes
###############################
# all this can be avoided with
# better query parameters
vote_direction = 'up'
if vote_type in ('2','6'):
vote_direction = 'down'
if vote_type in ('5', '6'):
#todo: fix this weirdness - why postId here
#and not with exercise?
id = request.POST.get('postId')
post = get_object_or_404(models.Post, post_type='problem', id=id)
else:
post = get_object_or_404(models.Post, post_type='exercise', id=id)
#
######################
response_data = process_vote(
user = request.user,
vote_direction = vote_direction,
post = post
)
####################################################################
if vote_type in ('1', '2'): # up/down-vote exercise
post.thread.update_summary_html() # regenerate exercise/thread summary html
####################################################################
elif vote_type in ['7', '8']:
#flag exercise or problem
if vote_type == '7':
post = get_object_or_404(models.Post, post_type='exercise', id=id)
if vote_type == '8':
id = request.POST.get('postId')
post = get_object_or_404(models.Post, post_type='problem', id=id)
request.user.flag_post(post)
response_data['count'] = post.offensive_flag_count
response_data['success'] = 1
elif vote_type in ['7.5', '8.5']:
#flag exercise or problem
if vote_type == '7.5':
post = get_object_or_404(models.Post, post_type='exercise', id=id)
if vote_type == '8.5':
id = request.POST.get('postId')
post = get_object_or_404(models.Post, post_type='problem', id=id)
request.user.flag_post(post, cancel = True)
response_data['count'] = post.offensive_flag_count
response_data['success'] = 1
elif vote_type in ['7.6', '8.6']:
#flag exercise or problem
if vote_type == '7.6':
post = get_object_or_404(models.Post, id=id)
if vote_type == '8.6':
id = request.POST.get('postId')
post = get_object_or_404(models.Post, id=id)
request.user.flag_post(post, cancel_all = True)
response_data['count'] = post.offensive_flag_count
response_data['success'] = 1
elif vote_type in ['9', '10']:
#delete exercise or problem
post = get_object_or_404(models.Post, post_type='exercise', id=id)
if vote_type == '10':
id = request.POST.get('postId')
post = get_object_or_404(models.Post, post_type='problem', id=id)
if post.deleted == True:
request.user.restore_post(post = post)
else:
request.user.delete_post(post = post)
elif request.is_ajax() and request.method == 'POST':
if not request.user.is_authenticated():
response_data['allowed'] = 0
response_data['success'] = 0
exercise = get_object_or_404(models.Post, post_type='exercise', id=id)
vote_type = request.POST.get('type')
#accept problem
if vote_type == '4':
fave = request.user.toggle_favorite_exercise(exercise)
response_data['count'] = models.FavoriteExercise.objects.filter(thread = exercise.thread).count()
if fave == False:
response_data['status'] = 1
elif vote_type == '11':#subscribe q updates
user = request.user
if user.is_authenticated():
if user not in exercise.thread.followed_by.all():
user.follow_exercise(exercise)
if askbot_settings.EMAIL_VALIDATION == True \
and user.email_isvalid == False:
response_data['message'] = \
_(
'Your subscription is saved, but email address '
'%(email)s needs to be validated, please see '
'<a href="%(details_url)s">more details here</a>'
) % {'email':user.email,'details_url':reverse('faq') + '#validate'}
subscribed = user.subscribe_for_followed_exercise_alerts()
if subscribed:
if 'message' in response_data:
response_data['message'] += '<br/>'
response_data['message'] += _('email update frequency has been set to daily')
#response_data['status'] = 1
#responst_data['allowed'] = 1
else:
pass
#response_data['status'] = 0
#response_data['allowed'] = 0
elif vote_type == '12':#unsubscribe q updates
user = request.user
if user.is_authenticated():
user.unfollow_exercise(exercise)
else:
response_data['success'] = 0
response_data['message'] = u'Request mode is not supported. Please try again.'
if vote_type not in (1, 2, 4, 5, 6, 11, 12):
#favorite or subscribe/unsubscribe
#upvote or downvote exercise or problem - those
#are handled within user.upvote and user.downvote
post = models.Post.objects.get(id = id)
post.thread.invalidate_cached_data()
data = simplejson.dumps(response_data)
except Exception, e:
response_data['message'] = unicode(e)
response_data['success'] = 0
data = simplejson.dumps(response_data)
return HttpResponse(data, mimetype="application/json")
#internally grouped views - used by the tagging system
@csrf.csrf_exempt
@decorators.post_only
@decorators.ajax_login_required
def mark_tag(request, **kwargs):#tagging system
action = kwargs['action']
post_data = simplejson.loads(request.raw_post_data)
raw_tagnames = post_data['tagnames']
reason = post_data['reason']
assert reason in ('good', 'bad', 'subscribed')
#separate plain tag names and wildcard tags
tagnames, wildcards = forms.clean_marked_tagnames(raw_tagnames)
cleaned_tagnames, cleaned_wildcards = request.user.mark_tags(
tagnames,
wildcards,
reason = reason,
action = action
)
#lastly - calculate tag usage counts
tag_usage_counts = dict()
for name in tagnames:
if name in cleaned_tagnames:
tag_usage_counts[name] = 1
else:
tag_usage_counts[name] = 0
for name in wildcards:
if name in cleaned_wildcards:
tag_usage_counts[name] = models.Tag.objects.filter(
name__startswith = name[:-1]
).count()
else:
tag_usage_counts[name] = 0
return HttpResponse(simplejson.dumps(tag_usage_counts), mimetype="application/json")
#@decorators.ajax_only
@decorators.get_only
def get_tags_by_wildcard(request):
"""returns an json encoded array of tag names
in the response to a wildcard tag name
"""
wildcard = request.GET.get('wildcard', None)
if wildcard is None:
raise Http404
matching_tags = models.Tag.objects.get_by_wildcards( [wildcard,] )
count = matching_tags.count()
names = matching_tags.values_list('name', flat = True)[:20]
re_data = simplejson.dumps({'tag_count': count, 'tag_names': list(names)})
return HttpResponse(re_data, mimetype = 'application/json')
@decorators.get_only
def get_thread_shared_users(request):
"""returns snippet of html with users"""
thread_id = request.GET['thread_id']
thread_id = IntegerField().clean(thread_id)
thread = models.Thread.objects.get(id=thread_id)
users = thread.get_users_shared_with()
data = {
'users': users,
}
html = render_into_skin_as_string('widgets/user_list.html', data, request)
re_data = simplejson.dumps({
'html': html,
'users_count': users.count(),
'success': True
})
return HttpResponse(re_data, mimetype='application/json')
@decorators.get_only
def get_thread_shared_groups(request):
"""returns snippet of html with groups"""
thread_id = request.GET['thread_id']
thread_id = IntegerField().clean(thread_id)
thread = models.Thread.objects.get(id=thread_id)
groups = thread.get_groups_shared_with()
data = {'groups': groups}
html = render_into_skin_as_string('widgets/groups_list.html', data, request)
re_data = simplejson.dumps({
'html': html,
'groups_count': groups.count(),
'success': True
})
return HttpResponse(re_data, mimetype='application/json')
@decorators.ajax_only
def get_html_template(request):
"""returns rendered template"""
template_name = request.REQUEST.get('template_name', None)
allowed_templates = (
'widgets/tag_category_selector.html',
)
#have allow simple context for the templates
if template_name not in allowed_templates:
raise Http404
return {
'html': get_template(template_name).render()
}
@decorators.get_only
def get_tag_list(request):
"""returns tags to use in the autocomplete
function
"""
tags = models.Tag.objects.filter(
deleted = False,
status = models.Tag.STATUS_ACCEPTED
)
tag_names = tags.values_list(
'name', flat = True
)
output = '\n'.join(map(escape, tag_names))
return HttpResponse(output, mimetype = 'text/plain')
@decorators.get_only
def load_object_description(request):
"""returns text of the object description in text"""
obj = get_db_object_or_404(request.GET)#askbot forms utility
text = getattr(obj.description, 'text', '').strip()
return HttpResponse(text, mimetype = 'text/plain')
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def save_object_description(request):
"""if object description does not exist,
creates a new record, otherwise edits an existing
one"""
obj = get_db_object_or_404(request.POST)
text = request.POST['text']
if obj.description:
request.user.edit_post(obj.description, body_text=text)
else:
request.user.post_object_description(obj, body_text=text)
return {'html': obj.description.html}
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def rename_tag(request):
if request.user.is_anonymous() \
or not request.user.is_administrator_or_moderator():
raise exceptions.PermissionDenied()
post_data = simplejson.loads(request.raw_post_data)
to_name = forms.clean_tag(post_data['to_name'])
from_name = forms.clean_tag(post_data['from_name'])
path = post_data['path']
#kwargs = {'from': old_name, 'to': new_name}
#call_command('rename_tags', **kwargs)
tree = category_tree.get_data()
category_tree.rename_category(
tree,
from_name = from_name,
to_name = to_name,
path = path
)
category_tree.save_data(tree)
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def delete_tag(request):
"""todo: actually delete tags
now it is only deletion of category from the tree"""
if request.user.is_anonymous() \
or not request.user.is_administrator_or_moderator():
raise exceptions.PermissionDenied()
post_data = simplejson.loads(request.raw_post_data)
tag_name = forms.clean_tag(post_data['tag_name'])
path = post_data['path']
tree = category_tree.get_data()
category_tree.delete_category(tree, tag_name, path)
category_tree.save_data(tree)
return {'tree_data': tree}
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def add_tag_category(request):
"""adds a category at the tip of a given path expects
the following keys in the ``request.POST``
* path - array starting with zero giving path to
the category page where to add the category
* new_category_name - string that must satisfy the
same requiremets as a tag
return json with the category tree data
todo: switch to json stored in the live settings
now we have indented input
"""
if request.user.is_anonymous() \
or not request.user.is_administrator_or_moderator():
raise exceptions.PermissionDenied()
post_data = simplejson.loads(request.raw_post_data)
category_name = forms.clean_tag(post_data['new_category_name'])
path = post_data['path']
tree = category_tree.get_data()
if category_tree.path_is_valid(tree, path) == False:
raise ValueError('category insertion path is invalid')
new_path = category_tree.add_category(tree, category_name, path)
category_tree.save_data(tree)
return {
'tree_data': tree,
'new_path': new_path
}
@decorators.get_only
def get_groups_list(request):
"""returns names of group tags
for the autocomplete function"""
global_group = get_global_group()
groups = models.Group.objects.exclude_personal()
group_names = groups.exclude(
name=global_group.name
).values_list(
'name', flat = True
)
output = '\n'.join(group_names)
return HttpResponse(output, mimetype = 'text/plain')
@csrf.csrf_protect
def subscribe_for_tags(request):
"""process subscription of users by tags"""
#todo - use special separator to split tags
tag_names = request.REQUEST.get('tags','').strip().split()
pure_tag_names, wildcards = forms.clean_marked_tagnames(tag_names)
if request.user.is_authenticated():
if request.method == 'POST':
if 'ok' in request.POST:
request.user.mark_tags(
pure_tag_names,
wildcards,
reason = 'good',
action = 'add'
)
request.user.message_set.create(
message = _('Your tag subscription was saved, thanks!')
)
else:
message = _(
'Tag subscription was canceled (<a href="%(url)s">undo</a>).'
) % {'url': request.path + '?tags=' + request.REQUEST['tags']}
request.user.message_set.create(message = message)
return HttpResponseRedirect(reverse('index'))
else:
data = {'tags': tag_names}
return render_into_skin('subscribe_for_tags.html', data, request)
else:
all_tag_names = pure_tag_names + wildcards
message = _('Please sign in to subscribe for: %(tags)s') \
% {'tags': ', '.join(all_tag_names)}
request.user.message_set.create(message = message)
request.session['subscribe_for_tags'] = (pure_tag_names, wildcards)
return HttpResponseRedirect(url_utils.get_login_url())
@decorators.get_only
def api_get_exercises(request):
"""json api for retrieving exercises"""
query = request.GET.get('query', '').strip()
if not query:
return HttpResponseBadRequest('Invalid query')
if askbot_settings.GROUPS_ENABLED:
threads = models.Thread.objects.get_visible(user=request.user)
else:
threads = models.Thread.objects.all()
threads = models.Thread.objects.get_for_query(
search_query=query,
qs=threads
)
if should_show_sort_by_relevance():
threads = threads.extra(order_by = ['-relevance'])
#todo: filter out deleted threads, for now there is no way
threads = threads.distinct()[:30]
thread_list = [{
'title': escape(thread.title),
'url': thread.get_absolute_url(),
'problem_count': thread.get_problem_count(request.user)
} for thread in threads]
json_data = simplejson.dumps(thread_list)
return HttpResponse(json_data, mimetype = "application/json")
@csrf.csrf_exempt
@decorators.post_only
@decorators.ajax_login_required
def set_tag_filter_strategy(request):
"""saves data in the ``User.[email/display]_tag_filter_strategy``
for the current user
"""
filter_type = request.POST['filter_type']
filter_value = int(request.POST['filter_value'])
assert(filter_type in ('display', 'email'))
if filter_type == 'display':
assert(filter_value in dict(const.TAG_DISPLAY_FILTER_STRATEGY_CHOICES))
request.user.display_tag_filter_strategy = filter_value
else:
assert(filter_value in dict(const.TAG_EMAIL_FILTER_STRATEGY_CHOICES))
request.user.email_tag_filter_strategy = filter_value
request.user.save()
return HttpResponse('', mimetype = "application/json")
@login_required
@csrf.csrf_protect
def close(request, id):#close exercise
"""view to initiate and process
exercise close
"""
exercise = get_object_or_404(models.Post, post_type='exercise', id=id)
try:
if request.method == 'POST':
form = forms.CloseForm(request.POST)
if form.is_valid():
reason = form.cleaned_data['reason']
request.user.close_exercise(
exercise = exercise,
reason = reason
)
return HttpResponseRedirect(exercise.get_absolute_url())
else:
request.user.assert_can_close_exercise(exercise)
form = forms.CloseForm()
data = {
'exercise': exercise,
'form': form,
}
return render_into_skin('close.html', data, request)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(exercise.get_absolute_url())
@login_required
@csrf.csrf_protect
def reopen(request, id):#re-open exercise
"""view to initiate and process
exercise close
this is not an ajax view
"""
exercise = get_object_or_404(models.Post, post_type='exercise', id=id)
# open exercise
try:
if request.method == 'POST' :
request.user.reopen_exercise(exercise)
return HttpResponseRedirect(exercise.get_absolute_url())
else:
request.user.assert_can_reopen_exercise(exercise)
closed_by_profile_url = exercise.thread.closed_by.get_profile_url()
closed_by_username = exercise.thread.closed_by.username
data = {
'exercise' : exercise,
'closed_by_profile_url': closed_by_profile_url,
'closed_by_username': closed_by_username,
}
return render_into_skin('reopen.html', data, request)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(exercise.get_absolute_url())
@csrf.csrf_exempt
@decorators.ajax_only
def swap_exercise_with_problem(request):
"""receives two json parameters - problem id
and new exercise title
the view is made to be used only by the site administrator
or moderators
"""
if request.user.is_authenticated():
if request.user.is_administrator() or request.user.is_moderator():
problem = models.Post.objects.get_problems(request.user).get(id = request.POST['problem_id'])
new_exercise = problem.swap_with_exercise(new_title = request.POST['new_title'])
return {
'id': new_exercise.id,
'slug': new_exercise.slug
}
raise Http404
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def upvote_comment(request):
if request.user.is_anonymous():
raise exceptions.PermissionDenied(_('Please sign in to vote'))
form = forms.VoteForm(request.POST)
if form.is_valid():
comment_id = form.cleaned_data['post_id']
cancel_vote = form.cleaned_data['cancel_vote']
comment = get_object_or_404(models.Post, post_type='comment', id=comment_id)
process_vote(
post = comment,
vote_direction = 'up',
user = request.user
)
else:
raise ValueError
#FIXME: rename js
return {'score': comment.points}
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def delete_post(request):
if request.user.is_anonymous():
raise exceptions.PermissionDenied(_('Please sign in to delete/restore posts'))
form = forms.VoteForm(request.POST)
if form.is_valid():
post_id = form.cleaned_data['post_id']
post = get_object_or_404(
models.Post,
post_type__in = ('exercise', 'problem'),
id = post_id
)
if form.cleaned_data['cancel_vote']:
request.user.restore_post(post)
else:
request.user.delete_post(post)
else:
raise ValueError
return {'is_deleted': post.deleted}
#askbot-user communication system
@csrf.csrf_exempt
def read_message(request):#marks message a read
if request.method == "POST":
if request.POST['formdata'] == 'required':
request.session['message_silent'] = 1
if request.user.is_authenticated():
request.user.delete_messages()
return HttpResponse('')
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def edit_group_membership(request):
#todo: this call may need to go.
#it used to be the one creating groups
#from the user profile page
#we have a separate method
form = forms.EditGroupMembershipForm(request.POST)
if form.is_valid():
group_name = form.cleaned_data['group_name']
user_id = form.cleaned_data['user_id']
try:
user = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
raise exceptions.PermissionDenied(
'user with id %d not found' % user_id
)
action = form.cleaned_data['action']
#warning: possible race condition
if action == 'add':
group_params = {'name': group_name, 'user': user}
group = models.Group.objects.get_or_create(**group_params)
request.user.edit_group_membership(user, group, 'add')
template = get_template('widgets/group_snippet.html')
return {
'name': group.name,
'description': getattr(group.tag_wiki, 'text', ''),
'html': template.render({'group': group})
}
elif action == 'remove':
try:
group = models.Group.objects.get(group_name = group_name)
request.user.edit_group_membership(user, group, 'remove')
except models.Group.DoesNotExist:
raise exceptions.PermissionDenied()
else:
raise exceptions.PermissionDenied()
else:
raise exceptions.PermissionDenied()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def save_group_logo_url(request):
"""saves urls for the group logo"""
form = forms.GroupLogoURLForm(request.POST)
if form.is_valid():
group_id = form.cleaned_data['group_id']
image_url = form.cleaned_data['image_url']
group = models.Group.objects.get(id = group_id)
group.logo_url = image_url
group.save()
else:
raise ValueError('invalid data found when saving group logo')
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def add_group(request):
group_name = request.POST.get('group')
if group_name:
group = models.Group.objects.get_or_create(
name=group_name,
openness=models.Group.OPEN,
user=request.user,
)
url = reverse('users_by_group', kwargs={'group_id': group.id,
'group_slug': slugify(group_name)})
response_dict = dict(group_name = group_name,
url = url )
return response_dict
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def delete_group_logo(request):
group_id = IntegerField().clean(int(request.POST['group_id']))
group = models.Group.objects.get(id = group_id)
group.logo_url = None
group.save()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def delete_post_reject_reason(request):
reason_id = IntegerField().clean(int(request.POST['reason_id']))
reason = models.PostFlagReason.objects.get(id = reason_id)
reason.delete()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def toggle_group_profile_property(request):
#todo: this might be changed to more general "toggle object property"
group_id = IntegerField().clean(int(request.POST['group_id']))
property_name = CharField().clean(request.POST['property_name'])
assert property_name in (
'moderate_email',
'moderate_problems_to_enquirers',
'is_vip'
)
group = models.Group.objects.get(id = group_id)
new_value = not getattr(group, property_name)
setattr(group, property_name, new_value)
group.save()
return {'is_enabled': new_value}
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def set_group_openness(request):
group_id = IntegerField().clean(int(request.POST['group_id']))
value = IntegerField().clean(int(request.POST['value']))
group = models.Group.objects.get(id=group_id)
group.openness = value
group.save()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.admins_only
def edit_object_property_text(request):
model_name = CharField().clean(request.REQUEST['model_name'])
object_id = IntegerField().clean(request.REQUEST['object_id'])
property_name = CharField().clean(request.REQUEST['property_name'])
accessible_fields = (
('Group', 'preapproved_emails'),
('Group', 'preapproved_email_domains')
)
if (model_name, property_name) not in accessible_fields:
raise exceptions.PermissionDenied()
obj = models.get_model(model_name).objects.get(id=object_id)
if request.method == 'POST':
text = CharField().clean(request.POST['text'])
setattr(obj, property_name, text)
obj.save()
elif request.method == 'GET':
return {'text': getattr(obj, property_name)}
else:
raise exceptions.PermissionDenied()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def join_or_leave_group(request):
"""called when user wants to join/leave
ask to join/cancel join request, depending
on the groups acceptance level for the given user
returns resulting "membership_level"
"""
if request.user.is_anonymous():
raise exceptions.PermissionDenied()
Group = models.Group
Membership = models.GroupMembership
group_id = IntegerField().clean(request.POST['group_id'])
group = Group.objects.get(id=group_id)
membership = request.user.get_group_membership(group)
if membership is None:
membership = request.user.join_group(group)
new_level = membership.get_level_display()
else:
membership.delete()
new_level = Membership.get_level_value_display(Membership.NONE)
return {'membership_level': new_level}
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def save_post_reject_reason(request):
"""saves post reject reason and returns the reason id
if reason_id is not given in the input - a new reason is created,
otherwise a reason with the given id is edited and saved
"""
form = forms.EditRejectReasonForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
details = form.cleaned_data['details']
if form.cleaned_data['reason_id'] is None:
reason = request.user.create_post_reject_reason(
title = title, details = details
)
else:
reason_id = form.cleaned_data['reason_id']
reason = models.PostFlagReason.objects.get(id = reason_id)
request.user.edit_post_reject_reason(
reason, title = title, details = details
)
return {
'reason_id': reason.id,
'title': title,
'details': details
}
else:
raise Exception(forms.format_form_errors(form))
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
@decorators.admins_only
def moderate_suggested_tag(request):
"""accepts or rejects a suggested tag
if thread id is given, then tag is
applied to or removed from only one thread,
otherwise the decision applies to all threads
"""
form = forms.ModerateTagForm(request.POST)
if form.is_valid():
tag_id = form.cleaned_data['tag_id']
thread_id = form.cleaned_data.get('thread_id', None)
try:
tag = models.Tag.objects.get(id=tag_id)#can tag not exist?
except models.Tag.DoesNotExist:
return
if thread_id:
threads = models.Thread.objects.filter(id = thread_id)
else:
threads = tag.threads.all()
if form.cleaned_data['action'] == 'accept':
#todo: here we lose ability to come back
#to the tag moderation and approve tag to
#other threads later for the case where tag.used_count > 1
tag.status = models.Tag.STATUS_ACCEPTED
tag.save()
for thread in threads:
thread.add_tag(
tag_name = tag.name,
user = tag.created_by,
timestamp = datetime.datetime.now(),
silent = True
)
else:
if tag.threads.count() > len(threads):
for thread in threads:
thread.tags.remove(tag)
tag.used_count = tag.threads.count()
tag.save()
elif tag.status == models.Tag.STATUS_SUGGESTED:
tag.delete()
else:
raise Exception(forms.format_form_errors(form))
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def save_draft_exercise(request):
"""saves draft exercises"""
#todo: allow drafts for anonymous users
if request.user.is_anonymous():
return
form = forms.DraftExerciseForm(request.POST)
if form.is_valid():
title = form.cleaned_data.get('title', '')
text = form.cleaned_data.get('text', '')
tagnames = form.cleaned_data.get('tagnames', '')
if title or text or tagnames:
try:
draft = models.DraftExercise.objects.get(author=request.user)
except models.DraftExercise.DoesNotExist:
draft = models.DraftExercise()
draft.title = title
draft.text = text
draft.tagnames = tagnames
draft.author = request.user
draft.save()
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def save_draft_problem(request):
"""saves draft problems"""
#todo: allow drafts for anonymous users
if request.user.is_anonymous():
return
form = forms.DraftProblemForm(request.POST)
if form.is_valid():
thread_id = form.cleaned_data['thread_id']
try:
thread = models.Thread.objects.get(id=thread_id)
except models.Thread.DoesNotExist:
return
try:
draft = models.DraftProblem.objects.get(
thread=thread,
author=request.user
)
except models.DraftProblem.DoesNotExist:
draft = models.DraftProblem()
draft.author = request.user
draft.thread = thread
draft.text = form.cleaned_data.get('text', '')
draft.save()
@decorators.get_only
def get_users_info(request):
"""retuns list of user names and email addresses
of "fake" users - so that admins can post on their
behalf"""
if request.user.is_anonymous():
return HttpResponseForbidden()
query = request.GET['q']
limit = IntegerField().clean(request.GET['limit'])
users = models.User.objects
user_info_list = users.filter(username__istartswith=query)
if request.user.is_administrator_or_moderator():
user_info_list = user_info_list.values_list('username', 'email')
else:
user_info_list = user_info_list.values_list('username')
result_list = ['|'.join(info) for info in user_info_list[:limit]]
return HttpResponse('\n'.join(result_list), mimetype = 'text/plain')
@csrf.csrf_protect
def share_exercise_with_group(request):
form = forms.ShareExerciseForm(request.POST)
try:
if form.is_valid():
thread_id = form.cleaned_data['thread_id']
group_name = form.cleaned_data['recipient_name']
thread = models.Thread.objects.get(id=thread_id)
exercise_post = thread._exercise_post()
#get notif set before
sets1 = exercise_post.get_notify_sets(
mentioned_users=list(),
exclude_list=[request.user,]
)
#share the post
if group_name == askbot_settings.GLOBAL_GROUP_NAME:
thread.make_public(recursive=True)
else:
group = models.Group.objects.get(name=group_name)
thread.add_to_groups((group,), recursive=True)
#get notif sets after
sets2 = exercise_post.get_notify_sets(
mentioned_users=list(),
exclude_list=[request.user,]
)
notify_sets = {
'for_mentions': sets2['for_mentions'] - sets1['for_mentions'],
'for_email': sets2['for_email'] - sets1['for_email'],
'for_inbox': sets2['for_inbox'] - sets1['for_inbox']
}
exercise_post.issue_update_notifications(
updated_by=request.user,
notify_sets=notify_sets,
activity_type=const.TYPE_ACTIVITY_POST_SHARED,
timestamp=datetime.datetime.now()
)
return HttpResponseRedirect(thread.get_absolute_url())
except Exception:
error_message = _('Sorry, looks like sharing request was invalid')
request.user.message_set.create(message=error_message)
return HttpResponseRedirect(thread.get_absolute_url())
@csrf.csrf_protect
def share_exercise_with_user(request):
form = forms.ShareExerciseForm(request.POST)
try:
if form.is_valid():
thread_id = form.cleaned_data['thread_id']
username = form.cleaned_data['recipient_name']
thread = models.Thread.objects.get(id=thread_id)
user = models.User.objects.get(username=username)
group = user.get_personal_group()
thread.add_to_groups([group], recursive=True)
#notify the person
#todo: see if user could already see the post - b/f the sharing
notify_sets = {
'for_inbox': set([user]),
'for_mentions': set([user]),
'for_email': set([user])
}
thread._exercise_post().issue_update_notifications(
updated_by=request.user,
notify_sets=notify_sets,
activity_type=const.TYPE_ACTIVITY_POST_SHARED,
timestamp=datetime.datetime.now()
)
return HttpResponseRedirect(thread.get_absolute_url())
except Exception:
error_message = _('Sorry, looks like sharing request was invalid')
request.user.message_set.create(message=error_message)
return HttpResponseRedirect(thread.get_absolute_url())
@csrf.csrf_protect
def moderate_group_join_request(request):
"""moderator of the group can accept or reject a new user"""
request_id = IntegerField().clean(request.POST['request_id'])
action = request.POST['action']
assert(action in ('approve', 'deny'))
activity = get_object_or_404(models.Activity, pk=request_id)
group = activity.content_object
applicant = activity.user
if group.has_moderator(request.user):
group_membership = models.GroupMembership.objects.get(
user=applicant, group=group
)
if action == 'approve':
group_membership.level = models.GroupMembership.FULL
group_membership.save()
msg_data = {'user': applicant.username, 'group': group.name}
message = _('%(user)s, welcome to group %(group)s!') % msg_data
applicant.message_set.create(message=message)
else:
group_membership.delete()
activity.delete()
url = request.user.get_absolute_url() + '?sort=inbox§ion=join_requests'
return HttpResponseRedirect(url)
else:
raise Http404
@decorators.get_only
def get_editor(request):
"""returns bits of html for the tinymce editor in a dictionary with keys:
* html - the editor element
* scripts - an array of script tags
* success - True
"""
config = simplejson.loads(request.GET['config'])
form = forms.EditorForm(editor_attrs=config)
editor_html = render_text_into_skin(
'{{ form.media }} {{ form.editor }}',
{'form': form},
request
)
#parse out javascript and dom, and return them separately
#we need that, because js needs to be added in a special way
html_soup = BeautifulSoup(editor_html)
parsed_scripts = list()
for script in html_soup.find_all('script'):
parsed_scripts.append({
'contents': script.string,
'src': script.get('src', None)
})
data = {
'html': str(html_soup.textarea),
'scripts': parsed_scripts,
'success': True
}
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
@csrf.csrf_exempt
@decorators.ajax_only
@decorators.post_only
def publish_problem(request):
"""will publish or unpublish problem, if
current thread is moderated
"""
denied_msg = _('Sorry, only thread moderators can use this function')
if request.user.is_authenticated():
if request.user.is_administrator_or_moderator() is False:
raise exceptions.PermissionDenied(denied_msg)
#todo: assert permission
problem_id = IntegerField().clean(request.POST['problem_id'])
problem = models.Post.objects.get(id=problem_id, post_type='problem')
if problem.thread.has_moderator(request.user) is False:
raise exceptions.PermissionDenied(denied_msg)
enquirer = problem.thread._exercise_post().author
enquirer_group = enquirer.get_personal_group()
if problem.has_group(enquirer_group):
message = _('The problem is now unpublished')
problem.remove_from_groups([enquirer_group])
else:
problem.add_to_groups([enquirer_group])
message = _('The problem is now published')
#todo: notify enquirer by email about the post
request.user.message_set.create(message=message)
return {'redirect_url': problem.get_absolute_url()}
|
maxwward/SCOPEBak
|
askbot/views/commands.py
|
Python
|
gpl-3.0
| 52,741
|
#!/usr/bin/env python
import os
import sys
import logging
import subprocess
from lib.environ import setup_environ
setup_environ()
# Don't allow `runserver` or `shell`
if 'runserver' in sys.argv:
logging.warn('You should serve your local instance with dev_appserver. See `serve.sh`')
subprocess.call('./serve.sh')
if 'shell' in sys.argv:
logging.warn('You should run the shell with ./shell.py, see for more info.')
subprocess.call('./shell.py')
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
papaloizouc/InvalidNameException
|
manage.py
|
Python
|
gpl-2.0
| 664
|
'''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,locale,string
import sasmol.sasmol as sasmol
import sassie.simulate.constraints.constraints as constraints
import sassie.interface.input_filter as input_filter
def check_input_values(variables, **kwargs):
runname = variables['runname'][0]
path = variables['path'][0]
pdbmol1 = variables['pdbmol1'][0]
pdbmol2 = variables['pdbmol2'][0]
ofile = variables['ofile'][0]
accpos = variables['accpos'][0]
pos = variables['pos'][0]
trans = variables['trans'][0]
dtrans = variables['dtrans'][0]
theta = variables['theta'][0]
dtheta = variables['dtheta'][0]
basis = variables['basis'][0]
cutoff = variables['cutoff'][0]
lowrg = variables['lowrg'][0]
highrg = variables['highrg'][0]
zflag = variables['zflag'][0]
zcutoff = variables['zcutoff'][0]
cflag = variables['cflag'][0]
confile = variables['confile'][0]
nexsegments1 = variables['nexsegments1'][0]
nsegments1 = variables['nsegments1'][0]
reslow1 = variables['reslow1'][0]
numcont1 = variables['numcont1'][0]
nexsegments2 = variables['nexsegments2'][0]
nsegments2 = variables['nsegments2'][0]
reslow2 = variables['reslow2'][0]
numcont2 = variables['numcont2'][0]
error = input_filter.check_name(runname)
if(error!=[]):
return error
error=[]
if 'no_file_check' not in kwargs:
ev,rv,wv=input_filter.check_permissions(path)
if(not ev or not rv or not wv):
error.append('permission error in input file path '+path+' [code = '+str(ev)+str(rv)+str(wv)+']')
if(ev==False):
error.append('path does not exist')
elif(rv==False):
error.append('read permission not allowed')
elif(wv==False):
error.append('write permission not allowed')
return error
pdbfile1=pdbmol1
pdbfile2=pdbmol2
print 'pdbfile1: ', pdbfile1
error = input_filter.check_file_exists(pdbfile1)
if(error!=[]):
return error
error = input_filter.check_file_exists(pdbfile2)
if(error!=[]):
return error
ev,value=input_filter.check_pdb_dcd(pdbfile1,'pdb')
#ev == 0 not tested since non-existent file will trigger check_file_exists test above
if(ev == 0):
error.append('input pdb file, '+pdbfile1[3:]+', does not exist')
return error
if(value == 0):
error.append( 'input pdb file, '+pdbfile1[3:]+', is not a valid pdb file')
return error
ev,value=input_filter.check_pdb_dcd(pdbfile2,'pdb')
#ev == 0 not tested since non-existent file will trigger check_file_exists test above
if(ev == 0):
error.append('input pdb file, '+pdbfile2[3:]+', does not exist')
return error
if(value == 0):
error.append( 'input pdb file, '+pdbfile2[3:]+', is not a valid pdb file')
return error
m1=sasmol.SasMol(0)
m1.read_pdb(pdbfile1)
m2=sasmol.SasMol(1)
m2.read_pdb(pdbfile2)
segname1 = m1.segname()
segname2 = m2.segname()
segment_names_1 = string.split(nsegments1,',')
segment_names_2 = string.split(nsegments2,',')
if(accpos != 0 and accpos != 1):
error.append( 'accept supplied position needs to be (0==no or 1==yes) : '+str(accpos))
return error
elif(len(pos)!=3):
error.append( 'three float values are required for initial position (x,y,z) : '+str(pos))
return error
elif(len(trans)!=3):
error.append( 'three int values are required for number of x,y,z moves : '+str(trans))
return error
elif(trans[0] < 1 or trans[1] < 1 or trans[2] < 1):
error.append( 'you must specifiy at least ONE translational "move" for each axis : '+str(trans))
return error
elif(len(dtrans)!=3):
error.append( 'three float values are required for dx,dy,dz step sizes : '+str(dtrans))
return error
elif(len(theta)!=3):
error.append( 'three int values are required for theta angular moves : '+str(theta))
return error
elif(theta[0] < 1 or theta[1] < 1 or theta[2] < 1):
error.append( 'you must specifiy at least ONE angular "move" for each axis : '+str(theta))
return error
elif(len(dtheta)!=3):
error.append( 'three float values are required for dtheta (x,y,z) step sizes : '+str(dtheta))
return error
elif(basis!='CA'):
error.append( 'only "CA" is accepted as a basis')
return error
elif(cutoff < 1.0):
error.append( 'use a larger cutoff value, cutoff = '+str(cutoff))
return error
elif(zflag != 0 and zflag != 1):
error.append( 'ERROR in Z coordinate filter selection: zflag == 0 for "no" and 1 for "yes", zflag = '+str(zflag))
return error
elif(cflag != 0 and cflag != 1):
error.append( 'ERROR in atomic constraints selection: cflag == 0 for "no" and 1 for "yes", cflag = '+str(cflag))
return error
elif(cflag == 1):
err = input_filter.check_file_exists(confile)
if(err != []):
lerr=['ERROR in constraint filename selection: ']
lerr.append(err)
error.append(lerr[0]+err[0])
return error
filter_flag = 1
m3=sasmol.SasMol(2)
err0 = m3.merge_two_molecules(m1,m2)
err = constraints.read_constraints(m3,confile,filter_flag)
if(err != []):
error.append(err[0])
return error
elif(lowrg > highrg):
error.append( 'low Rg cutoff is larger than high Rg cutoff, lowrg = '+str(lowrg)+' highrg = '+str(highrg))
return error
elif(lowrg < 0 or highrg < 0):
error.append( 'Rg cutoffs need to be >= zero, lowrg = '+str(lowrg)+' highrg = '+str(highrg))
return error
elif(nexsegments1 < 0):
error.append( 'number of excluded segments needs to be >= 0 (mol1) : '+str(nexsegments1))
return error
elif(nexsegments2 < 0):
error.append( 'number of excluded segments needs to be >= 0 (mol2) : '+str(nexsegments2))
return error
elif(nexsegments1 > 0 and len(segment_names_1)!=nexsegments1):
error.append( 'number of segment names does not match number of excluded segments (mol1) : '+str(nsegments1))
return error
elif(nexsegments2 > 0 and len(segment_names_2)!=nexsegments2):
error.append( 'number of segment names does not match number of excluded segments (mol2) : '+str(nsegments2))
return error
elif(nexsegments1 > 0 and len(reslow1) != nexsegments1):
error.append( 'the number of low residue values does not match the number of excluded segments (mol1), lowres1 = '+str(reslow1)+' nexsegments1 = '+str(nexsegments1))
return error
elif(nexsegments2 > 0 and len(reslow2) != nexsegments2):
error.append( 'the number of low residue values does not match the number of excluded segments (mol2), lowres2 = '+str(reslow2)+' nexsegments2 = '+str(nexsegments2))
return error
elif(nexsegments1 > 0 and len(numcont1) != nexsegments1):
error.append( 'the number of contiguous residues does not match the number of excluded segments (mol1), numcont1 = '+str(numcont1)+' nexsegments1 = '+str(nexsegments1))
return error
elif(nexsegments2 > 0 and len(numcont2) != nexsegments2):
error.append( 'the number of contiguous residues does not match the number of excluded segments (mol2), numcont2 = '+str(numcont2)+' nexsegments2 = '+str(nexsegments2))
return error
return error
|
madscatt/zazzie
|
src_2.7/sassie/interface/two_body_grid/two_body_grid_filter.py
|
Python
|
gpl-3.0
| 8,445
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Test for variants."""
from django.urls import reverse
from weblate.trans.models import Variant
from weblate.trans.tests.test_views import ViewTestCase
class VariantTest(ViewTestCase):
def create_component(self):
return self.create_android()
def add_variants(self, suffix: str = ""):
request = self.get_request()
translation = self.component.source_translation
translation.add_unit(request, f"bar{suffix}", "Default string", None)
translation.add_unit(request, "barMin", "Min string", None)
translation.add_unit(request, "barShort", "Short string", None)
def test_edit_component(self, suffix: str = ""):
self.add_variants()
self.assertEqual(Variant.objects.count(), 0)
self.component.variant_regex = "(Min|Short|Max)$"
self.component.save()
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 6)
self.component.variant_regex = ""
self.component.save()
self.assertEqual(Variant.objects.count(), 0)
def test_add_units(self, suffix: str = ""):
self.component.variant_regex = "(Min|Short|Max)$"
self.component.save()
self.assertEqual(Variant.objects.count(), 0)
self.add_variants(suffix)
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 6)
def test_edit_component_suffix(self):
self.test_edit_component("Max")
def test_add_units_suffix(self):
self.test_add_units("Max")
def test_variants_inner(self):
self.component.variant_regex = (
"//(SCRTEXT_S|SCRTEXT_M|SCRTEXT_L|REPTEXT|DDTEXT)"
)
self.component.save()
units = (
"DTEL///ABSD/DE_INTEND_POSTBACKGR//SCRTEXT_M 00001",
"DTEL///ABSD/DE_INTEND_POSTBACKGR//REPTEXT 00001",
"DTEL///ABSD/DE_INTEND_POSTBACKGR//SCRTEXT_L 00001",
"DTEL///ABSD/DE_INTEND_POSTBACKGR//SCRTEXT_S 00001",
"DTEL///ABSD/DE_INTEND_POSTBACKGR//DDTEXT 00001",
)
request = self.get_request()
translation = self.component.source_translation
for key in units:
translation.add_unit(request, key, "Test string", None)
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 10)
def test_variants_flag(self, code: str = "en"):
self.add_variants()
self.assertEqual(Variant.objects.count(), 0)
translation = self.component.translation_set.get(language_code=code)
unit = translation.unit_set.get(context="barMin")
unit.extra_flags = "variant:'Default string'"
unit.save()
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 4)
unit = translation.unit_set.get(context="barShort")
unit.extra_flags = "variant:'Default string'"
unit.save()
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 6)
unit = translation.unit_set.get(context="barMin")
unit.extra_flags = ""
unit.save()
self.assertEqual(Variant.objects.count(), 1)
self.assertEqual(Variant.objects.get().unit_set.count(), 4)
unit = translation.unit_set.get(context="barShort")
unit.extra_flags = ""
unit.save()
self.assertEqual(Variant.objects.count(), 0)
def test_variants_flag_translation(self):
self.test_variants_flag("cs")
def test_add_variant_unit(self):
self.make_manager()
base = "Thank you for using Weblate."
response = self.client.post(
reverse(
"new-unit",
kwargs={
"project": self.component.project.slug,
"component": self.component.slug,
"lang": "en",
},
),
{
"context": "variantial",
"source_0": "Source",
"variant": base,
},
follow=True,
)
self.assertContains(response, "New string has been added")
translation = self.component.translation_set.get(language_code="cs")
unit = translation.unit_set.get(context="variantial")
self.assertEqual(unit.source_unit.extra_flags, f'variant:"{base}"')
self.assertTrue(unit.defined_variants.exists())
|
phw/weblate
|
weblate/trans/tests/test_variants.py
|
Python
|
gpl-3.0
| 5,304
|
"""
问题描述:在一个二维坐标系中,所有的值都是double类型,那么一个三角形可以由3个点来代表,
给定3个点代表的三角形,再给定一个点(x,y),判断(x,y)是否在三角形中.
"""
class PointInTriangle:
@classmethod
def cross_product(cls, x1, y1, x2, y2):
return x1 * y2 - x2 * y1
@classmethod
def is_inside(cls, x1, y1, x2, y2, x3, y3, x, y):
if cls.cross_product(x3-x1, y3-y1, x2-x1, y2-y1) >= 0:
x2, x3 = x3, x2
y2, y3 = y3, y2
if cls.cross_product(x2-x1, y2-y1, x-x1, y-y1) < 0:
return False
if cls.cross_product(x3-x2, y3-y2, x-x2, y-y2) < 0:
return False
if cls.cross_product(x1-x3, y1-y3, x-x3, y-y3) < 0:
return False
return True
if __name__ == '__main__':
x1 = -5
y1 = 0
x2 = 0
y2 = 8
x3 = 5
y3 = 0
x = 0
y = 5
print(PointInTriangle.is_inside(x1, y1, x2, y2, x3, y3, x, y))
|
ResolveWang/algrithm_qa
|
other/q5.py
|
Python
|
mit
| 989
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# isocodes
#
# Copyright (C) 2011
# Einar Uvsløkk, <einar.uvslokk@linux.com>
#
# isocodes is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# oya-invitationals is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# ISO-639
languages = {
"Afar" : "aa",
"Abkhazian" : "ab",
"Avestan" : "ae",
"Afrikaans" : "af",
"Akan" : "ak",
"Amharic" : "am",
"Aragonese" : "an",
"Arabic" : "ar",
"Assamese" : "as",
"Avaric" : "av",
"Aymara" : "ay",
"Azerbaijani" : "az",
"Bashkir" : "ba",
"Belarusian" : "be",
"Bulgarian" : "bg",
"Bihari" : "bh",
"Bislama" : "bi",
"Bambara" : "bm",
"Bengali" : "bn",
"Bangla" : "bn",
"Tibetan" : "bo",
"Breton" : "br",
"Bosnian" : "bs",
"Catalan" : "ca",
"Chechen" : "ce",
"Chamorro" : "ch",
"Corsican" : "co",
"Cree" : "cr",
"Czech" : "cs",
"Church Slavic" : "cu",
"Chuvash" : "cv",
"Welsh" : "cy",
"Danish" : "da",
"German" : "de",
"Divehi" : "dv",
"Maldivian" : "dv",
"Dzongkha" : "dz",
"Bhutani" : "dz",
"Éwé" : "ee",
"Greek" : "el",
"English" : "en",
"Esperanto" : "eo",
"Spanish" : "es",
"Estonian" : "et",
"Basque" : "eu",
"Persian" : "fa",
"Fulah" : "ff",
"Finnish" : "fi",
"Fijian" : "fj",
"Fiji" : "fj",
"Faroese" : "fo",
"French" : "fr",
"Western Frisian" : "fy",
"Irish" : "ga",
"Scottish Gaelic" : "gd",
"Galician" : "gl",
"Guarani" : "gn",
"Gujarati" : "gu",
"Manx" : "gv",
"Hausa" : "ha",
"Hebrew (formerly iw)" : "he",
"Hindi" : "hi",
"Hiri Motu" : "ho",
"Croatian" : "hr",
"Haitian" : "ht",
"Haitian Creole" : "ht",
"Hungarian" : "hu",
"Armenian" : "hy",
"Herero" : "hz",
"Interlingua" : "ia",
"Indonesian (formerly in)" : "id",
"Interlingue" : "ie",
"Occidental" : "ie",
"Igbo" : "ig",
"Sichuan Yi" : "ii",
"Nuosu" : "ii",
"Inupiak" : "ik",
"Inupiaq" : "ik",
"Ido" : "io",
"Icelandic" : "is",
"Italian" : "it",
"Inuktitut" : "iu",
"Japanese" : "ja",
"Javanese" : "jv",
"Georgian" : "ka",
"Kongo" : "kg",
"Kikuyu" : "ki",
"Gikuyu" : "ki",
"Kuanyama" : "kj",
"Kwanyama" : "kj",
"Kazakh" : "kk",
"Kalaallisut" : "kl",
"Greenlandic" : "kl",
"Central Khmer" : "km",
"Cambodian" : "km",
"Kannada" : "kn",
"Korean" : "ko",
"Kanuri" : "kr",
"Kashmiri" : "ks",
"Kurdish" : "ku",
"Komi" : "kv",
"Cornish" : "kw",
"Kirghiz" : "ky",
"Latin" : "la",
"Letzeburgesch" : "lb",
"Luxembourgish" : "lb",
"Ganda" : "lg",
"Limburgish" : "li",
"Limburger" : "li",
"Limburgan" : "li",
"Lingala" : "ln",
"Lao" : "lo",
"Laotian" : "lo",
"Lithuanian" : "lt",
"Luba-Katanga" : "lu",
"Latvian" : "lv",
"Lettish" : "lv",
"Malagasy" : "mg",
"Marshallese" : "mh",
"Maori" : "mi",
"Macedonian" : "mk",
"Malayalam" : "ml",
"Mongolian" : "mn",
"Moldavian" : "mo",
"Marathi" : "mr",
"Malay" : "ms",
"Maltese" : "mt",
"Burmese" : "my",
"Nauru" : "na",
"Norwegian Bokmål" : "nb",
"Ndebele, North" : "nd",
"Nepali" : "ne",
"Ndonga" : "ng",
"Dutch" : "nl",
"Norwegian Nynorsk" : "nn",
"Norwegian" : "no",
"Ndebele, South" : "nr",
"Navajo" : "nv",
"Navaho" : "nv",
"Chichewa" : "ny",
"Nyanja" : "ny",
"Occitan" : "oc",
"Provençal" : "oc",
"Ojibwa" : "oj",
"(Afan) Oromo" : "om",
"Oriya" : "or",
"Ossetian" : "os",
"Ossetic" : "os",
"Panjabi" : "pa",
"Punjabi" : "pa",
"Pali" : "pi",
"Polish" : "pl",
"Pashto" : "ps",
"Pushto" : "ps",
"Portuguese" : "pt",
"Quechua" : "qu",
"Romansh" : "rm",
"Rundi" : "rn",
"Kirundi" : "rn",
"Romanian" : "ro",
"Russian" : "ru",
"Kinyarwanda" : "rw",
"Sanskrit" : "sa",
"Sardinian" : "sc",
"Sindhi" : "sd",
"Northern Sami" : "se",
"Sango" : "sg",
"Sangro" : "sg",
"Sinhala" : "si",
"Sinhalese" : "si",
"Slovak" : "sk",
"Slovenian" : "sl",
"Samoan" : "sm",
"Shona" : "sn",
"Somali" : "so",
"Albanian" : "sq",
"Serbian" : "sr",
"Swati" : "ss",
"Siswati" : "ss",
"Sesotho" : "st",
"Sotho, Southern" : "st",
"Sundanese" : "su",
"Swedish" : "sv",
"Swahili" : "sw",
"Tamil" : "ta",
"Telugu" : "te",
"Tajik" : "tg",
"Thai" : "th",
"Tigrinya" : "ti",
"Turkmen" : "tk",
"Tagalog" : "tl",
"Tswana" : "tn",
"Setswana" : "tn",
"Tonga" : "to",
"Turkish" : "tr",
"Tsonga" : "ts",
"Tatar" : "tt",
"Twi" : "tw",
"Tahitian" : "ty",
"Uighur" : "ug",
"Ukrainian" : "uk",
"Urdu" : "ur",
"Uzbek" : "uz",
"Venda" : "ve",
"Vietnamese" : "vi",
"Volapük" : "vo",
"Volapuk" : "vo",
"Walloon" : "wa",
"Wolof" : "wo",
"Xhosa" : "xh",
"Yiddish (formerly ji)" : "yi",
"Yoruba" : "yo",
"Zhuang" : "za",
"Chinese" : "zh",
"Zulu" : "zu",
}
# ISO-3166
countries = {
"Andorra" : "AD",
"United Arab Emirates" : "AE",
"Afghanistan" : "AF",
"Antigua and Barbuda" : "AG",
"Anguilla" : "AI",
"Albania" : "AL",
"Armenia" : "AM",
"Netherlands Antilles" : "AN",
"Angola" : "AO",
"Antarctica" : "AQ",
"Argentina" : "AR",
"Samoa (American)" : "AS",
"Austria" : "AT",
"Australia" : "AU",
"Aruba" : "AW",
"Aaland Islands" : "AX",
"Azerbaijan" : "AZ",
"Bosnia and Herzegovina" : "BA",
"Barbados" : "BB",
"Bangladesh" : "BD",
"Belgium" : "BE",
"Burkina Faso" : "BF",
"Bulgaria" : "BG",
"Bahrain" : "BH",
"Burundi" : "BI",
"Benin" : "BJ",
"Bermuda" : "BM",
"Brunei" : "BN",
"Bolivia" : "BO",
"Brazil" : "BR",
"Bahamas" : "BS",
"Bhutan" : "BT",
"Bouvet Island" : "BV",
"Botswana" : "BW",
"Belarus" : "BY",
"Belize" : "BZ",
"Canada" : "CA",
"Cocos (Keeling) Islands" : "CC",
"Congo (Dem. Rep.)" : "CD",
"Central African Republic" : "CF",
"Congo (Rep.)" : "CG",
"Switzerland" : "CH",
"Côte d'Ivoire" : "CI",
"Cook Islands" : "CK",
"Chile" : "CL",
"Cameroon" : "CM",
"China" : "CN",
"Colombia" : "CO",
"Costa Rica" : "CR",
"Cuba" : "CU",
"Cape Verde" : "CV",
"Christmas Island" : "CX",
"Cyprus" : "CY",
"Czech Republic" : "CZ",
"Germany" : "DE",
"Djibouti" : "DJ",
"Denmark" : "DK",
"Dominica" : "DM",
"Dominican Republic" : "DO",
"Algeria" : "DZ",
"Ecuador" : "EC",
"Estonia" : "EE",
"Egypt" : "EG",
"Western Sahara" : "EH",
"Eritrea" : "ER",
"Spain" : "ES",
"Ethiopia" : "ET",
"Finland" : "FI",
"Fiji" : "FJ",
"Falkland Islands" : "FK",
"Micronesia" : "FM",
"Faeroe Islands" : "FO",
"France" : "FR",
"Gabon" : "GA",
"Britain (United Kingdom)" : "GB",
"Grenada" : "GD",
"Georgia" : "GE",
"French Guiana" : "GF",
"Guernsey" : "GG",
"Ghana" : "GH",
"Gibraltar" : "GI",
"Greenland" : "GL",
"Gambia" : "GM",
"Guinea" : "GN",
"Guadeloupe" : "GP",
"Equatorial Guinea" : "GQ",
"Greece" : "GR",
"South Georgia and the South Sandwich Islands" : "GS",
"Guatemala" : "GT",
"Guam" : "GU",
"Guinea-Bissau" : "GW",
"Guyana" : "GY",
"Hong Kong" : "HK",
"Heard Island and McDonald Islands" : "HM",
"Honduras" : "HN",
"Croatia" : "HR",
"Haiti" : "HT",
"Hungary" : "HU",
"Indonesia" : "ID",
"Ireland" : "IE",
"Israel" : "IL",
"Isle of Man" : "IM",
"India" : "IN",
"British Indian Ocean Territory" : "IO",
"Iraq" : "IQ",
"Iran" : "IR",
"Iceland" : "IS",
"Italy" : "IT",
"Jersey" : "JE",
"Jamaica" : "JM",
"Jordan" : "JO",
"Japan" : "JP",
"Kenya" : "KE",
"Kyrgyzstan" : "KG",
"Cambodia" : "KH",
"Kiribati" : "KI",
"Comoros" : "KM",
"St Kitts and Nevis" : "KN",
"Korea (North)" : "KP",
"Korea (South)" : "KR",
"Kuwait" : "KW",
"Cayman Islands" : "KY",
"Kazakhstan" : "KZ",
"Laos" : "LA",
"Lebanon" : "LB",
"St Lucia" : "LC",
"Liechtenstein" : "LI",
"Sri Lanka" : "LK",
"Liberia" : "LR",
"Lesotho" : "LS",
"Lithuania" : "LT",
"Luxembourg" : "LU",
"Latvia" : "LV",
"Libya" : "LY",
"Morocco" : "MA",
"Monaco" : "MC",
"Moldova" : "MD",
"Montenegro" : "ME",
"Madagascar" : "MG",
"Marshall Islands" : "MH",
"Macedonia" : "MK",
"Mali" : "ML",
"Myanmar (Burma)" : "MM",
"Mongolia" : "MN",
"Macao" : "MO",
"Northern Mariana Islands" : "MP",
"Martinique" : "MQ",
"Mauritania" : "MR",
"Montserrat" : "MS",
"Malta" : "MT",
"Mauritius" : "MU",
"Maldives" : "MV",
"Malawi" : "MW",
"Mexico" : "MX",
"Malaysia" : "MY",
"Mozambique" : "MZ",
"Namibia" : "NA",
"New Caledonia" : "NC",
"Niger" : "NE",
"Norfolk Island" : "NF",
"Nigeria" : "NG",
"Nicaragua" : "NI",
"Netherlands" : "NL",
"Norway" : "NO",
"Nepal" : "NP",
"Nauru" : "NR",
"Niue" : "NU",
"New Zealand" : "NZ",
"Oman" : "OM",
"Panama" : "PA",
"Peru" : "PE",
"French Polynesia" : "PF",
"Papua New Guinea" : "PG",
"Philippines" : "PH",
"Pakistan" : "PK",
"Poland" : "PL",
"St Pierre and Miquelon" : "PM",
"Pitcairn" : "PN",
"Puerto Rico" : "PR",
"Palestine" : "PS",
"Portugal" : "PT",
"Palau" : "PW",
"Paraguay" : "PY",
"Qatar" : "QA",
"Reunion" : "RE",
"Romania" : "RO",
"Serbia" : "RS",
"Russia" : "RU",
"Rwanda" : "RW",
"Saudi Arabia" : "SA",
"Solomon Islands" : "SB",
"Seychelles" : "SC",
"Sudan" : "SD",
"Sweden" : "SE",
"Singapore" : "SG",
"St Helena" : "SH",
"Slovenia" : "SI",
"Svalbard and Jan Mayen" : "SJ",
"Slovakia" : "SK",
"Sierra Leone" : "SL",
"San Marino" : "SM",
"Senegal" : "SN",
"Somalia" : "SO",
"Suriname" : "SR",
"Sao Tome and Principe" : "ST",
"El Salvador" : "SV",
"Syria" : "SY",
"Swaziland" : "SZ",
"Turks and Caicos Islands" : "TC",
"Chad" : "TD",
"French Southern and Antarctic Lands" : "TF",
"Togo" : "TG",
"Thailand" : "TH",
"Tajikistan" : "TJ",
"Tokelau" : "TK",
"Timor-Leste" : "TL",
"Turkmenistan" : "TM",
"Tunisia" : "TN",
"Tonga" : "TO",
"Turkey" : "TR",
"Trinidad and Tobago" : "TT",
"Tuvalu" : "TV",
"Taiwan" : "TW",
"Tanzania" : "TZ",
"Ukraine" : "UA",
"Uganda" : "UG",
"US minor outlying islands" : "UM",
"United States" : "US",
"Uruguay" : "UY",
"Uzbekistan" : "UZ",
"Vatican City" : "VA",
"St Vincent and the Grenadines" : "VC",
"Venezuela" : "VE",
"Virgin Islands (UK)" : "VG",
"Virgin Islands (US)" : "VI",
"Vietnam" : "VN",
"Vanuatu" : "VU",
"Wallis and Futuna" : "WF",
"Samoa (Western)" : "WS",
"Yemen" : "YE",
"Mayotte" : "YT",
"South Africa" : "ZA",
"Zambia" : "ZM",
"Zimbabwe" : "ZW",
}
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
vegarwe/luma
|
tools/util/isocodes.py
|
Python
|
gpl-2.0
| 11,783
|
# Copyright 2014-2017 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# Copyright 2018-2019 Sergio Teruel <sergio.teruel@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.fields import first
class StockPicking(models.Model):
_inherit = 'stock.picking'
returned_ids = fields.Many2many(
comodel_name="stock.picking", compute="_compute_returned_ids",
string="Returned pickings")
source_picking_id = fields.Many2one(
comodel_name="stock.picking",
compute="_compute_source_picking_id",
string="Source picking")
@api.multi
def _compute_returned_ids(self):
for picking in self:
picking.returned_ids = picking.mapped(
'move_lines.returned_move_ids.picking_id')
@api.depends('move_lines.origin_returned_move_id')
def _compute_source_picking_id(self):
"""Get source piking from this picking. Only one origin is possible.
"""
for picking in self:
picking.source_picking_id = first(picking.mapped(
'move_lines.origin_returned_move_id.picking_id'))
def action_show_source_picking(self):
""" Open source picking form action """
return self.source_picking_id.get_formview_action()
|
Vauxoo/stock-logistics-workflow
|
stock_picking_show_return/models/stock_picking.py
|
Python
|
agpl-3.0
| 1,320
|
import tensorflow as tf
from niftynet.application.segmentation_application import \
SegmentationApplication
from niftynet.engine.application_factory import OptimiserFactory
from niftynet.engine.application_variables import CONSOLE
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.layer.loss_segmentation import LossFunction
SUPPORTED_INPUT = set(['image', 'label', 'weight'])
class DecayLearningRateApplication(SegmentationApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"
def __init__(self, net_param, action_param, is_training):
SegmentationApplication.__init__(
self, net_param, action_param, is_training)
tf.logging.info('starting decay learning segmentation application')
self.learning_rate = None
self.current_lr = action_param.lr
if self.action_param.validation_every_n > 0:
raise NotImplementedError("validation process is not implemented "
"in this demo.")
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
data_dict = self.get_sampler()[0][0].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, self.is_training)
if self.is_training:
with tf.name_scope('Optimiser'):
self.learning_rate = tf.placeholder(tf.float32, shape=[])
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.learning_rate)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))
loss = data_loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=self.learning_rate, name='lr',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='dice_loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
else:
# converting logits into final output for
# classification probabilities or argmax classification labels
SegmentationApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)
def set_iteration_update(self, iteration_message):
"""
This function will be called by the application engine at each
iteration.
"""
current_iter = iteration_message.current_iter
if iteration_message.is_training:
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
iteration_message.data_feed_dict[self.learning_rate] = self.current_lr
|
NifTK/NiftyNet
|
demos/Learning_Rate_Decay/Demo_applications/no_decay_lr_comparison_application.py
|
Python
|
apache-2.0
| 3,933
|
import sys
import os
if sys.platform == "win32":
import _winreg
import yaku.utils
import yaku.task
if sys.platform == "win32":
from yaku.tools.mscommon.common \
import \
read_keys, open_key, close_key, get_output
from yaku.tools.msvc \
import \
_exec_command_factory
_ROOT = {"amd64": r"Software\Wow6432Node\Intel\Suites",
"ia32": r"Software\Intel\Compilers"}
_FC_ROOT = {"amd64": r"Software\Wow6432Node\Intel\Compilers",
"ia32": r"Software\Intel\Compilers"}
_ABI2BATABI = {"amd64": "intel64", "ia32": "ia32"}
def find_versions_fc(abi):
base = _winreg.HKEY_LOCAL_MACHINE
key = os.path.join(_FC_ROOT[abi], "Fortran")
availables = {}
versions = read_keys(base, key)
if versions is None:
return availables
for v in versions:
verk = os.path.join(key, v)
key = open_key(verk)
try:
maj = _winreg.QueryValueEx(key, "Major Version")[0]
min = _winreg.QueryValueEx(key, "Minor Version")[0]
bld = _winreg.QueryValueEx(key, "Revision")[0]
availables[(maj, min, bld)] = verk
finally:
close_key(key)
return availables
def product_dir_fc(root):
k = open_key(root)
try:
return _winreg.QueryValueEx(k, "ProductDir")[0]
finally:
close_key(k)
def setup(ctx):
env = ctx.env
env.update(
{"F77": ["ifort"],
"F77_LINK": ["ifort"],
"F77_LINKFLAGS": [],
"F77FLAGS": [],
"F77_TGT_F": ["-o"],
"F77_SRC_F": ["-c"],
"F77_LINK_TGT_F": ["-o"],
"F77_LINK_SRC_F": [],
"F77_OBJECT_FMT": "%s.o",
"F77_PROGRAM_FMT": "%s"})
if sys.platform == "win32":
env.update(
{"F77FLAGS": ["/nologo"],
"F77_TGT_F": ["/object:"],
"F77_SRC_F": ["/c"],
"F77_LINKFLAGS": ["/nologo"],
"F77_LINK_TGT_F": ["/link", "/out:"],
"F77_OBJECT_FMT": "%s.obj",
"F77_PROGRAM_FMT": "%s.exe"})
abi = "amd64"
availables = find_versions_fc(abi)
if len(availables) < 1:
raise ValueError("No ifort version found for abi %s" % abi)
versions = sorted(availables.keys())[::-1]
pdir = product_dir_fc(availables[versions[0]])
batfile = os.path.join(pdir, "bin", "ifortvars.bat")
d = get_output(ctx, batfile, _ABI2BATABI[abi])
for k, v in d.items():
if k in ["LIB"]:
ctx.env.extend("LIBDIR", v, create=True)
elif k in ["INCLUDE"]:
ctx.env.extend("CPPPATH", v, create=True)
for p in d["PATH"]:
exe = os.path.join(p, "ifort.exe")
if os.path.exists(exe):
env["F77"] = [exe]
env["F77_LINK"] = [exe]
break
if sys.platform == "win32":
for task_class in ["f77", "fprogram"]:
klass = yaku.task.task_factory(task_class)
saved = klass.exec_command
klass.exec_command = _exec_command_factory(saved)
def detect(ctx):
if yaku.utils.find_program("ifort") is None:
return False
else:
return True
|
abadger/Bento
|
bento/private/_yaku/yaku/tools/ifort.py
|
Python
|
bsd-3-clause
| 3,189
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter the data for a rebase session.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from .Ui_HgRebaseDialog import Ui_HgRebaseDialog
class HgRebaseDialog(QDialog, Ui_HgRebaseDialog):
"""
Class implementing a dialog to enter the data for a rebase session.
"""
def __init__(self, tagsList, branchesList, bookmarksList=None,
parent=None):
"""
Constructor
@param tagsList list of tags (list of strings)
@param branchesList list of branches (list of strings)
@param bookmarksList list of bookmarks (list of strings)
@param parent reference to the parent widget (QWidget)
"""
super(HgRebaseDialog, self).__init__(parent)
self.setupUi(self)
self.tag1Combo.addItems(sorted(tagsList))
self.tag2Combo.addItems(sorted(tagsList))
self.branch1Combo.addItems(["default"] + sorted(branchesList))
self.branch2Combo.addItems(["default"] + sorted(branchesList))
if bookmarksList is not None:
self.bookmark1Combo.addItems(sorted(bookmarksList))
self.bookmark2Combo.addItems(sorted(bookmarksList))
else:
self.bookmark1Button.setHidden(True)
self.bookmark1Combo.setHidden(True)
self.bookmark2Button.setHidden(True)
self.bookmark2Combo.setHidden(True)
msh = self.minimumSizeHint()
self.resize(max(self.width(), msh.width()), msh.height())
def __updateOK(self):
"""
Private slot to update the OK button.
"""
enabled = True
if not self.parentButton.isChecked():
if self.id1Button.isChecked():
enabled = enabled and self.id1Edit.text() != ""
elif self.tag1Button.isChecked():
enabled = enabled and self.tag1Combo.currentText() != ""
elif self.branch1Button.isChecked():
enabled = enabled and self.branch1Combo.currentText() != ""
elif self.bookmark1Button.isChecked():
enabled = enabled and self.bookmark1Combo.currentText() != ""
if self.id2Button.isChecked():
enabled = enabled and self.id2Edit.text() != ""
elif self.tag2Button.isChecked():
enabled = enabled and self.tag2Combo.currentText() != ""
elif self.branch2Button.isChecked():
enabled = enabled and self.branch2Combo.currentText() != ""
elif self.bookmark2Button.isChecked():
enabled = enabled and self.bookmark2Combo.currentText() != ""
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)
@pyqtSlot(bool)
def on_id1Button_toggled(self, checked):
"""
Private slot to handle changes of the ID1 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_id2Button_toggled(self, checked):
"""
Private slot to handle changes of the ID2 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_tag1Button_toggled(self, checked):
"""
Private slot to handle changes of the Tag1 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_tag2Button_toggled(self, checked):
"""
Private slot to handle changes of the Tag2 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_branch1Button_toggled(self, checked):
"""
Private slot to handle changes of the Branch1 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_branch2Button_toggled(self, checked):
"""
Private slot to handle changes of the Branch2 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_bookmark1Button_toggled(self, checked):
"""
Private slot to handle changes of the Bookmark1 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(bool)
def on_bookmark2Button_toggled(self, checked):
"""
Private slot to handle changes of the Bookmark2 select button.
@param checked state of the button (boolean)
"""
self.__updateOK()
@pyqtSlot(str)
def on_id1Edit_textChanged(self, txt):
"""
Private slot to handle changes of the ID1 edit.
@param txt text of the edit (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_id2Edit_textChanged(self, txt):
"""
Private slot to handle changes of the ID2 edit.
@param txt text of the edit (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_tag1Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Tag1 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_tag2Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Tag2 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_branch1Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Branch1 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_branch2Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Branch2 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_bookmark1Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Bookmark1 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
@pyqtSlot(str)
def on_bookmark2Combo_editTextChanged(self, txt):
"""
Private slot to handle changes of the Bookmark2 combo.
@param txt text of the combo (string)
"""
self.__updateOK()
def __getRevision(self, no):
"""
Private method to generate the revision.
@param no revision number to generate (1 or 2)
@return revision (string)
"""
if no == 1:
numberButton = self.number1Button
numberSpinBox = self.number1SpinBox
idButton = self.id1Button
idEdit = self.id1Edit
tagButton = self.tag1Button
tagCombo = self.tag1Combo
branchButton = self.branch1Button
branchCombo = self.branch1Combo
bookmarkButton = self.bookmark1Button
bookmarkCombo = self.bookmark1Combo
tipButton = None
else:
numberButton = self.number2Button
numberSpinBox = self.number2SpinBox
idButton = self.id2Button
idEdit = self.id2Edit
tagButton = self.tag2Button
tagCombo = self.tag2Combo
branchButton = self.branch2Button
branchCombo = self.branch2Combo
bookmarkButton = self.bookmark2Button
bookmarkCombo = self.bookmark2Combo
tipButton = self.tip2Button
if numberButton.isChecked():
return "rev({0})".format(numberSpinBox.value())
elif idButton.isChecked():
return "id({0})".format(idEdit.text())
elif tagButton.isChecked():
return tagCombo.currentText()
elif branchButton.isChecked():
return branchCombo.currentText()
elif bookmarkButton.isChecked():
return bookmarkCombo.currentText()
elif tipButton and tipButton.isChecked():
return ""
def getData(self):
"""
Public method to retrieve the data for the rebase session.
@return tuple with a source indicator of "S" or "B", the source
revision, the destination revision, a flag indicating to collapse,
a flag indicating to keep the original changesets, a flag
indicating to keep the original branch name and a flag indicating
to detach the source (string, string, string, boolean, boolean,
boolean, boolean)
"""
if self.sourceButton.isChecked():
indicator = "S"
elif self.baseButton.isChecked():
indicator = "B"
else:
indicator = ""
if indicator:
rev1 = self.__getRevision(1)
else:
rev1 = ""
return (
indicator,
rev1,
self.__getRevision(2),
self.collapseCheckBox.isChecked(),
self.keepChangesetsCheckBox.isChecked(),
self.keepBranchCheckBox.isChecked(),
self.detachCheckBox.isChecked()
)
|
davy39/eric
|
Plugins/VcsPlugins/vcsMercurial/RebaseExtension/HgRebaseDialog.py
|
Python
|
gpl-3.0
| 9,628
|
# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# Code borrowed from statsmodels
#
# License: BSD (3-clause)
import numpy as np
def _ecdf(x):
"""No frills empirical cdf used in fdrcorrection."""
nobs = len(x)
return np.arange(1, nobs + 1) / float(nobs)
def fdr_correction(pvals, alpha=0.05, method='indep'):
"""P-value correction with False Discovery Rate (FDR).
Correction for multiple comparison using FDR.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : 'indep' | 'negcorr'
If 'indep' it implements Benjamini/Hochberg for independent or if
'negcorr' it corresponds to Benjamini/Yekutieli.
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
Reference:
Genovese CR, Lazar NA, Nichols T.
Thresholding of statistical maps in functional neuroimaging using the false
discovery rate. Neuroimage. 2002 Apr;15(4):870-8.
"""
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))
ecdffactor = _ecdf(pvals_sorted) / cm
else:
raise ValueError("Method should be 'indep' and 'negcorr'")
reject = pvals_sorted < (ecdffactor * alpha)
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
else:
rejectmax = 0
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected > 1.0] = 1.0
pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)
reject = reject[sortrevind].reshape(shape_init)
return reject, pvals_corrected
def bonferroni_correction(pval, alpha=0.05):
"""P-value correction with Bonferroni method.
Parameters
----------
pval : array_like
set of p-values of the individual tests.
alpha : float
error rate
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
"""
pval = np.asarray(pval)
pval_corrected = pval * float(pval.size)
reject = pval_corrected < alpha
return reject, pval_corrected
|
adykstra/mne-python
|
mne/stats/multi_comp.py
|
Python
|
bsd-3-clause
| 3,033
|
from django.http import HttpResponse
from django.core.cache import cache
from EventSubscriptions.models import EventWithSubscriptions
import requests
'''
This view provides the final result requested from the exercise. It first looks for it in the cache; if the result is
not there, it calls the class "EventWithSubscriptions" in order to perform the query. If the response is successful,
is being stored inside the cache. Cache timeout is 4.2 minutes (252 seconds).
'''
def event_subscriptions(req):
if req.method == 'GET': # we process GET requests only
if req.path.split('/')[2] == '': # it is mandatory to insert characters for the event_id
return HttpResponse("The path must be: \"/events-with-subscriptions/EVENT_ID\"")
event_id = req.path.split('/')[2]
key = 'event-with-subscriptions'
timeout = 252 # cache timeout (4.2 minutes)
result = cache.get(key) # trying to get the result from the cache, otherwise => query to the website
if result is None:
obj = EventWithSubscriptions(event_id)
result = obj.perform_and_combine_responses()
# if the method gives us a response code or an Exception, we do not store the result in the cache
if type(result) is int:
return HttpResponse("Server returned with the following error code: " + str(result) +
" . Please, try again later.")
if type(result) is requests.Timeout:
return HttpResponse("Connection timeout! Please, try again later")
if type(result) is requests.RequestException:
return HttpResponse("Problems with the network/server, please try again later.")
cache.set(key, result, timeout) # if the response is successful, setting the result in cache
return HttpResponse(result, content_type='application/json')
else:
return HttpResponse("Unrecognized method")
|
luispdm/Calendar42Proxy
|
EventSubscriptions/views.py
|
Python
|
gpl-3.0
| 1,983
|
#!/usr/bin/env python
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
'''
An example of Density-fitted AGF2 with MPI (the MPI support is very transparent,
so this example is almost identical to 02-dfagf2.py).
MPI support is provided by mpi4py module. The implementation is also hybrid
parallelized, and therefore may benefit from a combination of OMP threads
and MPI processes. OMP threads will automatically be used if OMP_NUM_THREADS
is appropriately set.
Default AGF2 corresponds to the AGF2(1,0) method outlined in the papers:
- O. J. Backhouse, M. Nusspickel and G. H. Booth, J. Chem. Theory Comput., 16, 1090 (2020).
- O. J. Backhouse and G. H. Booth, J. Chem. Theory Comput., 16, 6294 (2020).
'''
from pyscf import gto, scf, agf2
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', verbose=3 if agf2.mpi_helper.rank == 0 else 0)
mf = scf.RHF(mol).density_fit(auxbasis='cc-pv5z-ri')
mf.conv_tol = 1e-12
mf.run()
# Run an AGF2 calculation
gf2 = agf2.AGF2(mf)
gf2.conv_tol = 1e-7
gf2.run(verbose=4 if agf2.mpi_helper.rank == 0 else 0)
# Print the first 3 ionization potentials
gf2.ipagf2(nroots=3)
# Print the first 3 electron affinities
gf2.eaagf2(nroots=3)
|
sunqm/pyscf
|
examples/agf2/09-mpi_dfragf2.py
|
Python
|
apache-2.0
| 1,251
|
"""
This module implements WSGI related helpers adapted from ``werkzeug.wsgi``
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from opbeat.utils import six
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
def get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in six.iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_environ(environ):
"""
Returns our whitelisted environment variables.
"""
for key in ('REMOTE_ADDR', 'SERVER_NAME', 'SERVER_PORT'):
if key in environ:
yield key, environ[key]
# `get_host` comes from `werkzeug.wsgi`
def get_host(environ):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header.
:param environ: the WSGI environment to get the host of.
"""
scheme = environ.get('wsgi.url_scheme')
if 'HTTP_X_FORWARDED_HOST' in environ:
result = environ['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in environ:
result = environ['HTTP_HOST']
else:
result = environ['SERVER_NAME']
if (scheme, str(environ['SERVER_PORT'])) not \
in (('https', '443'), ('http', '80')):
result += ':' + environ['SERVER_PORT']
if result.endswith(':80') and scheme == 'http':
result = result[:-3]
elif result.endswith(':443') and scheme == 'https':
result = result[:-4]
return result
# `get_current_url` comes from `werkzeug.wsgi`
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ)]
cat = tmp.append
if host_only:
return ''.join(tmp) + '/'
cat(quote(environ.get('SCRIPT_NAME', '').rstrip('/')))
if root_only:
cat('/')
else:
cat(quote('/' + environ.get('PATH_INFO', '').lstrip('/')))
if not strip_querystring:
qs = environ.get('QUERY_STRING')
if qs:
cat('?' + qs)
return ''.join(tmp)
|
daikeren/opbeat_python
|
opbeat/utils/wsgi.py
|
Python
|
bsd-3-clause
| 3,335
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import time
import pandas as pd
import sqlite3
TR_REQ_TIME_INTERVAL = 0.2
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self._create_kiwoom_instance()
self._set_signal_slots()
def _create_kiwoom_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
def _set_signal_slots(self):
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
def comm_connect(self):
self.dynamicCall("CommConnect()")
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
def _event_connect(self, err_code):
if err_code == 0:
print("connected")
else:
print("disconnected")
self.login_event_loop.exit()
def get_code_list_by_market(self, market):
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market)
code_list = code_list.split(';')
return code_list[:-1]
def get_master_code_name(self, code):
code_name = self.dynamicCall("GetMasterCodeName(QString)", code)
return code_name
def get_connect_state(self):
ret = self.dynamicCall("GetConnectState()")
return ret
def set_input_value(self, id, value):
self.dynamicCall("SetInputValue(QString, QString)", id, value)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _comm_get_data(self, code, real_type, field_name, index, item_name):
ret = self.dynamicCall("CommGetData(QString, QString, QString, int, QString)", code,
real_type, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
def _opt10081(self, rqname, trcode):
data_cnt = self._get_repeat_cnt(trcode, rqname)
for i in range(data_cnt):
date = self._comm_get_data(trcode, "", rqname, i, "일자")
open = self._comm_get_data(trcode, "", rqname, i, "시가")
high = self._comm_get_data(trcode, "", rqname, i, "고가")
low = self._comm_get_data(trcode, "", rqname, i, "저가")
close = self._comm_get_data(trcode, "", rqname, i, "현재가")
volume = self._comm_get_data(trcode, "", rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
if __name__ == "__main__":
app = QApplication(sys.argv)
kiwoom = Kiwoom()
kiwoom.comm_connect()
kiwoom.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
# opt10081 TR 요청
kiwoom.set_input_value("종목코드", "039490")
kiwoom.set_input_value("기준일자", "20170224")
kiwoom.set_input_value("수정주가구분", 1)
kiwoom.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
while kiwoom.remained_data == True:
time.sleep(TR_REQ_TIME_INTERVAL)
kiwoom.set_input_value("종목코드", "039490")
kiwoom.set_input_value("기준일자", "20170224")
kiwoom.set_input_value("수정주가구분", 1)
kiwoom.comm_rq_data("opt10081_req", "opt10081", 2, "0101")
df = pd.DataFrame(kiwoom.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=kiwoom.ohlcv['date'])
con = sqlite3.connect("c:/Users/Jason/stock.db")
df.to_sql('039490', con, if_exists='replace')
|
pystockhub/book
|
ch18/day01/Kiwoom.py
|
Python
|
mit
| 4,398
|
# Copyright (c) 2013-2015 Rusty Wagner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from BinaryData import *
from Struct import *
from HexEditor import *
from View import *
class PEFile(BinaryAccessor):
class SectionInfo:
def __init__(self):
self.virtual_size = None
self.virtual_address = None
self.size_of_raw_data = None
self.pointer_to_raw_data = None
self.characteristics = None
def __init__(self, data):
self.data = data
self.valid = False
self.callbacks = []
self.symbols_by_name = {}
self.symbols_by_addr = {}
if not self.is_pe():
return
try:
self.tree = Struct(self.data)
self.mz = self.tree.struct("MZ header", "mz")
self.mz.uint16("magic")
self.mz.uint16("lastsize")
self.mz.uint16("nblocks")
self.mz.uint16("nreloc")
self.mz.uint16("hdrsize")
self.mz.uint16("minalloc")
self.mz.uint16("maxalloc")
self.mz.uint16("ss")
self.mz.uint16("sp")
self.mz.uint16("checksum")
self.mz.uint16("ip")
self.mz.uint16("cs")
self.mz.uint16("relocpos")
self.mz.uint16("noverlay")
self.mz.bytes(8, "reserved1")
self.mz.uint16("oem_id")
self.mz.uint16("oem_info")
self.mz.bytes(20, "reserved2")
self.mz.uint32("pe_offset")
self.header = self.tree.struct("PE header", "header")
self.header.seek(self.mz.pe_offset)
self.header.uint32("magic")
self.header.uint16("machine")
self.header.uint16("section_count")
self.header.uint32("timestamp")
self.header.uint32("coff_symbol_table")
self.header.uint32("coff_symbol_count")
self.header.uint16("optional_header_size")
self.header.uint16("characteristics")
self.header.struct("Optional header", "opt")
self.header.opt.uint16("magic")
self.header.opt.uint8("major_linker_version")
self.header.opt.uint8("minor_linker_version")
self.header.opt.uint32("size_of_code")
self.header.opt.uint32("size_of_init_data")
self.header.opt.uint32("size_of_uninit_data")
self.header.opt.uint32("address_of_entry")
self.header.opt.uint32("base_of_code")
if self.header.opt.magic == 0x10b: # 32-bit
self.bits = 32
self.header.opt.uint32("base_of_data")
self.header.opt.uint32("image_base")
self.header.opt.uint32("section_align")
self.header.opt.uint32("file_align")
self.header.opt.uint16("major_os_version")
self.header.opt.uint16("minor_os_version")
self.header.opt.uint16("major_image_version")
self.header.opt.uint16("minor_image_version")
self.header.opt.uint16("major_subsystem_version")
self.header.opt.uint16("minor_subsystem_version")
self.header.opt.uint32("win32_version")
self.header.opt.uint32("size_of_image")
self.header.opt.uint32("size_of_headers")
self.header.opt.uint32("checksum")
self.header.opt.uint16("subsystem")
self.header.opt.uint16("dll_characteristics")
self.header.opt.uint32("size_of_stack_reserve")
self.header.opt.uint32("size_of_stack_commit")
self.header.opt.uint32("size_of_heap_reserve")
self.header.opt.uint32("size_of_heap_commit")
self.header.opt.uint32("loader_flags")
self.header.opt.uint32("data_dir_count")
elif self.header.opt.magic == 0x20b: # 64-bit
self.bits = 64
self.header.opt.uint64("image_base")
self.header.opt.uint32("section_align")
self.header.opt.uint32("file_align")
self.header.opt.uint16("major_os_version")
self.header.opt.uint16("minor_os_version")
self.header.opt.uint16("major_image_version")
self.header.opt.uint16("minor_image_version")
self.header.opt.uint16("major_subsystem_version")
self.header.opt.uint16("minor_subsystem_version")
self.header.opt.uint32("win32_version")
self.header.opt.uint32("size_of_image")
self.header.opt.uint32("size_of_headers")
self.header.opt.uint32("checksum")
self.header.opt.uint16("subsystem")
self.header.opt.uint16("dll_characteristics")
self.header.opt.uint64("size_of_stack_reserve")
self.header.opt.uint64("size_of_stack_commit")
self.header.opt.uint64("size_of_heap_reserve")
self.header.opt.uint64("size_of_heap_commit")
self.header.opt.uint32("loader_flags")
self.header.opt.uint32("data_dir_count")
else:
self.valid = False
return
self.image_base = self.header.opt.image_base
self.data_dirs = self.header.array(self.header.opt.data_dir_count, "data_dirs")
for i in xrange(0, self.header.opt.data_dir_count):
self.data_dirs[i].uint32("virtual_address")
self.data_dirs[i].uint32("size")
self.sections = []
header_section_obj = PEFile.SectionInfo()
header_section_obj.virtual_size = self.header.opt.size_of_headers
header_section_obj.virtual_address = 0
header_section_obj.size_of_raw_data = self.header.opt.size_of_headers
header_section_obj.pointer_to_raw_data = 0
header_section_obj.characteristics = 0
self.sections.append(header_section_obj)
self.tree.array(self.header.section_count, "sections")
for i in xrange(0, self.header.section_count):
section = self.tree.sections[i]
section.seek(self.mz.pe_offset + self.header.optional_header_size + 24 + (i * 40))
section.bytes(8, "name")
section.uint32("virtual_size")
section.uint32("virtual_address")
section.uint32("size_of_raw_data")
section.uint32("pointer_to_raw_data")
section.uint32("pointer_to_relocs")
section.uint32("pointer_to_line_numbers")
section.uint16("reloc_count")
section.uint16("line_number_count")
section.uint32("characteristics")
section_obj = PEFile.SectionInfo()
section_obj.virtual_size = section.virtual_size
section_obj.virtual_address = section.virtual_address & ~(self.header.opt.section_align - 1)
section_obj.size_of_raw_data = section.size_of_raw_data
section_obj.pointer_to_raw_data = section.pointer_to_raw_data & ~(self.header.opt.file_align - 1)
section_obj.characteristics = section.characteristics
self.sections.append(section_obj)
self.symbols_by_name["_start"] = self.entry()
self.symbols_by_addr[self.entry()] = "_start"
if self.header.opt.data_dir_count >= 2:
self.imports = self.tree.array(0, "imports")
for i in xrange(0, self.data_dirs[1].size / 20):
if self.read(self.image_base + self.data_dirs[1].virtual_address + (i * 20), 4) == "\0\0\0\0":
break
if self.read(self.image_base + self.data_dirs[1].virtual_address + (i * 20) + 16, 4) == "\0\0\0\0":
break
self.imports.append()
dll = self.imports[i]
dll.seek(self.virtual_address_to_file_offset(self.image_base + self.data_dirs[1].virtual_address) + (i * 20))
dll.uint32("lookup")
dll.uint32("timestamp")
dll.uint32("forward_chain")
dll.uint32("name")
dll.uint32("iat")
for dll in self.imports:
name = self.read_string(self.image_base + dll.name).split('.')
if len(name) > 1:
name = '.'.join(name[0:-1])
else:
name = name[0]
entry_ofs = self.image_base + dll.lookup
iat_ofs = self.image_base + dll.iat
while True:
if self.bits == 32:
entry = self.read_uint32(entry_ofs)
is_ordinal = (entry & 0x80000000) != 0
entry &= 0x7fffffff
else:
entry = self.read_uint64(entry_ofs)
is_ordinal = (entry & 0x8000000000000000) != 0
entry &= 0x7fffffffffffffff
if (not is_ordinal) and (entry == 0):
break
if is_ordinal:
func = name + "!Ordinal%d" % (entry & 0xffff)
else:
func = name + "!" + self.read_string(self.image_base + entry + 2)
self.symbols_by_name[func] = iat_ofs
self.symbols_by_addr[iat_ofs] = func
entry_ofs += self.bits / 8
iat_ofs += self.bits / 8
if (self.header.opt.data_dir_count >= 1) and (self.data_dirs[0].size >= 40):
self.exports = self.tree.struct("Export directory", "exports")
self.exports.seek(self.virtual_address_to_file_offset(self.image_base + self.data_dirs[0].virtual_address))
self.exports.uint32("characteristics")
self.exports.uint32("timestamp")
self.exports.uint16("major_version")
self.exports.uint16("minor_version")
self.exports.uint32("dll_name")
self.exports.uint32("base")
self.exports.uint32("function_count")
self.exports.uint32("name_count")
self.exports.uint32("address_of_functions")
self.exports.uint32("address_of_names")
self.exports.uint32("address_of_name_ordinals")
self.exports.array(self.exports.function_count, "functions")
for i in xrange(0, self.exports.function_count):
self.exports.functions[i].seek(self.virtual_address_to_file_offset(self.image_base + self.exports.address_of_functions) + (i * 4))
self.exports.functions[i].uint32("address")
self.exports.array(self.exports.name_count, "names")
for i in xrange(0, self.exports.name_count):
self.exports.names[i].seek(self.virtual_address_to_file_offset(self.image_base + self.exports.address_of_names) + (i * 4))
self.exports.names[i].uint32("address_of_name")
self.exports.array(self.exports.name_count, "name_ordinals")
for i in xrange(0, self.exports.name_count):
self.exports.name_ordinals[i].seek(self.virtual_address_to_file_offset(self.image_base + self.exports.address_of_name_ordinals) + (i * 2))
self.exports.name_ordinals[i].uint16("ordinal")
for i in xrange(0, self.exports.name_count):
function_index = self.exports.name_ordinals[i].ordinal - self.exports.base
address = self.image_base + self.exports.functions[function_index].address
name = self.read_string(self.image_base + self.exports.names[i].address_of_name)
self.symbols_by_addr[address] = name
self.symbols_by_name[name] = address
self.tree.complete()
self.valid = True
except:
self.valid = False
if self.valid:
self.data.add_callback(self)
def read_string(self, addr):
result = ""
while True:
ch = self.read(addr, 1)
addr += 1
if (len(ch) == 0) or (ch == '\0'):
break
result += ch
return result
def virtual_address_to_file_offset(self, addr):
for i in self.sections:
if ((addr >= (self.image_base + i.virtual_address)) and (addr < (self.image_base + i.virtual_address + i.virtual_size))) and (i.virtual_size != 0):
cur = i
if cur == None:
return None
ofs = addr - (self.image_base + cur.virtual_address)
return cur.pointer_to_raw_data + ofs
def read(self, ofs, len):
result = ""
while len > 0:
cur = None
for i in self.sections:
if ((ofs >= (self.image_base + i.virtual_address)) and (ofs < (self.image_base + i.virtual_address + i.virtual_size))) and (i.virtual_size != 0):
cur = i
if cur == None:
break
prog_ofs = ofs - (self.image_base + cur.virtual_address)
mem_len = cur.virtual_size - prog_ofs
file_len = cur.size_of_raw_data - prog_ofs
if mem_len > len:
mem_len = len
if file_len > len:
file_len = len
if file_len <= 0:
result += "\x00" * mem_len
len -= mem_len
ofs += mem_len
continue
result += self.data.read(cur.pointer_to_raw_data + prog_ofs, file_len)
len -= file_len
ofs += file_len
return result
def next_valid_addr(self, ofs):
result = -1
for i in self.sections:
if ((self.image_base + i.virtual_address) >= ofs) and (i.virtual_size != 0) and ((result == -1) or ((self.image_base + i.virtual_address) < result)):
result = self.image_base + i.virtual_address
return result
def get_modification(self, ofs, len):
result = []
while len > 0:
cur = None
for i in self.sections:
if ((ofs >= (self.image_base + i.virtual_address)) and (ofs < (self.image_base + i.virtual_address + i.virtual_size))) and (i.virtual_size != 0):
cur = i
if cur == None:
break
prog_ofs = ofs - (self.image_base + cur.virtual_address)
mem_len = cur.virtual_size - prog_ofs
file_len = cur.size_of_raw_data - prog_ofs
if mem_len > len:
mem_len = len
if file_len > len:
file_len = len
if file_len <= 0:
result += [DATA_ORIGINAL] * mem_len
len -= mem_len
ofs += mem_len
continue
result += self.data.get_modification(cur.pointer_to_raw_data + prog_ofs, file_len)
len -= file_len
ofs += file_len
return result
def write(self, ofs, data):
result = 0
while len(data) > 0:
cur = None
for i in self.sections:
if ((ofs >= (self.image_base + i.virtual_address)) and (ofs < (self.image_base + i.virtual_address + i.virtual_size))) and (i.virtual_size != 0):
cur = i
if cur == None:
break
prog_ofs = ofs - (self.image_base + cur.virtual_address)
mem_len = cur.virtual_size - prog_ofs
file_len = cur.size_of_raw_data - prog_ofs
if mem_len > len:
mem_len = len
if file_len > len:
file_len = len
if file_len <= 0:
break
result += self.data.write(cur.pointer_to_raw_data + prog_ofs, data[0:file_len])
data = data[file_len:]
ofs += file_len
return result
def insert(self, ofs, data):
return 0
def remove(self, ofs, size):
return 0
def notify_data_write(self, data, ofs, contents):
# Find sections that hold data backed by updated regions of the file
for i in self.sections:
if ((ofs + len(contents)) > i.pointer_to_raw_data) and (ofs < (i.pointer_to_raw_data + i.size_of_raw_data)) and (i.virtual_size != 0):
# This section has been updated, compute which region has been changed
from_start = ofs - i.pointer_to_raw_data
data_ofs = 0
length = len(contents)
if from_start < 0:
length += from_start
data_ofs -= from_start
from_start = 0
if (from_start + length) > i.size_of_raw_data:
length = i.size_of_raw_data - from_start
# Notify callbacks
if length > 0:
for cb in self.callbacks:
if hasattr(cb, "notify_data_write"):
cb.notify_data_write(self, self.image_base + i.virtual_address + from_start,
contents[data_ofs:(data_ofs + length)])
def save(self, filename):
self.data.save(filename)
def start(self):
return self.image_base
def entry(self):
return self.image_base + self.header.opt.address_of_entry
def __len__(self):
max = None
for i in self.sections:
if ((max == None) or ((self.image_base + i.virtual_address + i.virtual_size) > max)) and (i.virtual_size != 0):
max = self.image_base + i.virtual_address + i.virtual_size
return max - self.start()
def is_pe(self):
if self.data.read(0, 2) != "MZ":
return False
ofs = self.data.read(0x3c, 4)
if len(ofs) != 4:
return False
ofs = struct.unpack("<I", ofs)[0]
if self.data.read(ofs, 4) != "PE\0\0":
return False
magic = self.data.read(ofs + 24, 2)
if len(magic) != 2:
return False
magic = struct.unpack("<H", magic)[0]
return (magic == 0x10b) or (magic == 0x20b)
def architecture(self):
if self.header.machine == 0x14c:
return "x86"
if self.header.machine == 0x8664:
return "x86_64"
if self.header.machine == 0x166:
return "mips"
if self.header.machine == 0x266:
return "mips16"
if self.header.machine == 0x366:
return "mips"
if self.header.machine == 0x466:
return "mips16"
if self.header.machine == 0x1f0:
return "ppc"
if self.header.machine == 0x1f1:
return "ppc"
if self.header.machine == 0x1c0:
return "arm"
if self.header.machine == 0x1c2:
return "thumb"
if self.header.machine == 0x1c4:
return "thumb"
if self.header.machine == 0xaa64:
return "arm64"
if self.header.machine == 0x200:
return "ia64"
return None
def decorate_plt_name(self, name):
return name + "@IAT"
def create_symbol(self, addr, name):
self.symbols_by_name[name] = addr
self.symbols_by_addr[addr] = name
def delete_symbol(self, addr, name):
if name in self.symbols_by_name:
del(self.symbols_by_name[name])
if addr in self.symbols_by_addr:
del(self.symbols_by_addr[addr])
def add_callback(self, cb):
self.callbacks.append(cb)
def remove_callback(self, cb):
self.callbacks.remove(cb)
def is_modified(self):
return self.data.is_modified()
def find(self, regex, addr):
while (addr < self.end()) and (addr != -1):
data = self.read(addr, 0xfffffffff)
match = regex.search(data)
if match != None:
return match.start() + addr
addr += len(data)
addr = self.next_valid_addr(addr)
return -1
def has_undo_actions(self):
return self.data.has_undo_actions()
def commit_undo(self, before_loc, after_loc):
self.data.commit_undo(before_loc, after_loc)
def undo(self):
self.data.undo()
def redo(self):
self.data.redo()
class PEViewer(HexEditor):
def __init__(self, data, filename, view, parent):
view.exe = PEFile(data)
super(PEViewer, self).__init__(view.exe, filename, view, parent)
view.register_navigate("exe", self, self.navigate)
def getPriority(data, ext):
if data.read(0, 2) != "MZ":
return -1
ofs = data.read(0x3c, 4)
if len(ofs) != 4:
return -1
ofs = struct.unpack("<I", ofs)[0]
if data.read(ofs, 4) != "PE\0\0":
return -1
magic = data.read(ofs + 24, 2)
if len(magic) != 2:
return -1
magic = struct.unpack("<H", magic)[0]
if (magic == 0x10b) or (magic == 0x20b):
return 25
return -1
getPriority = staticmethod(getPriority)
def getViewName():
return "PE viewer"
getViewName = staticmethod(getViewName)
def getShortViewName():
return "PE"
getShortViewName = staticmethod(getShortViewName)
def handlesNavigationType(name):
return name == "exe"
handlesNavigationType = staticmethod(handlesNavigationType)
ViewTypes += [PEViewer]
|
kanpol/bina
|
PEFile.py
|
Python
|
gpl-2.0
| 18,019
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import bitcoin
import keystore
from keystore import bip44_derivation
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, storage):
super(BaseWizard, self).__init__()
self.config = config
self.storage = storage
self.wallet = None
self.stack = []
self.plugin = None
self.keystores = []
self.is_kivy = config.get('gui') == 'kivy'
self.seed_type = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
f(self, *args)
elif hasattr(self, action):
f = getattr(self, action)
f(*args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Create") + ' ' + name.decode('utf8')
message = '\n'.join([
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Vertcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def load_2fa(self):
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.load_2fa()
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
i = len(self.keystores)
title = _('Add cosigner') + ' (%d of %d)'%(i+1, self.n) if self.wallet_type=='multisig' else _('Keystore')
if self.wallet_type =='standard' or i==0:
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_from_seed', _('I already have a seed')),
('restore_from_key', _('Use public or private keys')),
]
#if not self.is_kivy:
# choices.append(('choose_hw_device', _('Use a hardware device')))
else:
message = _('Add a cosigner to your multi-sig wallet')
choices = [
('restore_from_key', _('Enter cosigner key')),
('restore_from_seed', _('Enter cosigner seed')),
]
#if not self.is_kivy:
# choices.append(('choose_hw_device', _('Cosign with hardware device')))
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Vertcoin Addresses")
message = _("Enter a list of Vertcoin addresses. This will create a watching-only wallet.")
self.add_xpub_dialog(title=title, message=message, run_next=self.on_import_addresses, is_valid=v)
def on_import_addresses(self, text):
assert keystore.is_address_list(text)
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Create keystore from keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Vertcoin private keys.")
])
self.add_xpub_dialog(title=title, message=message, run_next=self.on_restore_from_key, is_valid=v)
else:
v = keystore.is_bip32_key
i = len(self.keystores) + 1
self.add_cosigner_dialog(index=i, run_next=self.on_restore_from_key, is_valid=v)
def on_restore_from_key(self, text):
k = keystore.from_keys(text)
self.on_keystore(k)
def choose_hw_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# scan devices
devices = []
devmgr = self.plugins.device_manager
for name, description, plugin in support:
try:
# FIXME: side-effect: unpaired_device_info sets client.handler
u = devmgr.unpaired_device_infos(None, plugin)
except:
devmgr.print_error("error", name)
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = ''.join([
_('No hardware device detected.') + '\n',
_('To trigger a rescan, press \'Next\'.') + '\n\n',
_('If your device is not detected on Windows, go to "Settings", "Devices", "Connected devices", and do "Remove device". Then, plug your device again.') + ' ',
_('On Linux, you might have to add a new permission to your udev rules.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_hw_device())
return
# select device
self.devices = devices
choices = []
for name, info in devices:
state = _("initialized") if info.initialized else _("wiped")
label = info.label or _("An unnamed %s")%name
descr = "%s [%s, %s]" % (label, name, state)
choices.append(((name, info), descr))
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
self.plugin = self.plugins.get_plugin(name)
try:
self.plugin.setup_device(device_info, self)
except BaseException as e:
self.show_error(str(e))
self.choose_hw_device()
return
if self.wallet_type=='multisig':
# There is no general standard for HD multisig.
# This is partially compatible with BIP45; assumes index=0
self.on_hw_derivation(name, device_info, "m/45'/0")
else:
f = lambda x: self.run('on_hw_derivation', name, device_info, str(x))
self.derivation_dialog(f)
def derivation_dialog(self, f):
default = bip44_derivation(0)
message = '\n'.join([
_('Enter your wallet derivation here.'),
_('If you are not sure what this is, leave this field unchanged.')
])
self.line_dialog(run_next=f, title=_('Derivation'), message=message, default=default, test=bitcoin.is_bip32_derivation)
def on_hw_derivation(self, name, device_info, derivation):
from keystore import hardware_keystore
xpub = self.plugin.get_xpub(device_info.device.id_, derivation, self)
if xpub is None:
self.show_error('Cannot read xpub from device')
return
d = {
'type': 'hardware',
'hw_type': name,
'derivation': derivation,
'xpub': xpub,
'label': device_info.label,
}
k = hardware_keystore(d)
self.on_keystore(k)
def passphrase_dialog(self, run_next):
title = _('Seed extension')
message = '\n'.join([
_('You may extend your seed with custom words.'),
_('Your seed extension must be saved together with your seed.'),
])
warning = '\n'.join([
_('Note that this is NOT your encryption password.'),
_('If you do not know what this is, leave this field empty.'),
])
self.line_dialog(title=title, message=message, warning=warning, default='', test=lambda x:True, run_next=run_next)
def restore_from_seed(self):
self.opt_bip39 = True
self.opt_ext = True
test = bitcoin.is_seed if self.wallet_type == 'standard' else bitcoin.is_new_seed
self.restore_seed_dialog(run_next=self.on_restore_seed, test=test)
def on_restore_seed(self, seed, is_bip39, is_ext):
self.seed_type = 'bip39' if is_bip39 else bitcoin.seed_type(seed)
if self.seed_type == 'bip39':
f = lambda passphrase: self.on_restore_bip39(seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type in ['standard', 'segwit']:
f = lambda passphrase: self.run('create_keystore', seed, passphrase)
self.passphrase_dialog(run_next=f) if is_ext else f('')
elif self.seed_type == 'old':
self.run('create_keystore', seed, '')
elif self.seed_type == '2fa':
if self.is_kivy:
self.show_error('2FA seeds are not supported in this version')
self.run('restore_from_seed')
else:
self.load_2fa()
self.run('on_restore_seed', seed, is_ext)
else:
raise BaseException('Unknown seed type', seed_type)
def on_restore_bip39(self, seed, passphrase):
f = lambda x: self.run('on_bip44', seed, passphrase, str(x))
self.derivation_dialog(f)
def create_keystore(self, seed, passphrase):
k = keystore.from_seed(seed, passphrase)
self.on_keystore(k)
def on_bip44(self, seed, passphrase, derivation):
k = keystore.BIP32_KeyStore({})
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
k.add_xprv_from_seed(bip32_seed, 0, derivation)
self.on_keystore(k)
def on_keystore(self, k):
if self.wallet_type == 'standard':
self.keystores.append(k)
self.run('create_wallet')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
self.show_error(_('Error: duplicate master public key'))
self.run('choose_keystore')
return
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
self.run('create_wallet')
def create_wallet(self):
if any(k.may_have_password() for k in self.keystores):
self.request_password(run_next=self.on_password)
else:
self.on_password(None, False)
def on_password(self, password, encrypt):
self.storage.set_password(password, encrypt)
for k in self.keystores:
if k.may_have_password():
k.update_password(None, password)
if self.wallet_type == 'standard':
self.storage.put('seed_type', self.seed_type)
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_keys(text, password)
self.on_keystore(k)
def create_seed(self):
import mnemonic
self.seed_type = 'segwit' if bitcoin.TESTNET and self.config.get('segwit') else 'standard'
seed = mnemonic.Mnemonic('en').make_seed(self.seed_type)
self.opt_bip39 = False
f = lambda x: self.request_passphrase(seed, x)
self.show_seed_dialog(run_next=f, seed_text=seed)
def request_passphrase(self, seed, opt_passphrase):
if opt_passphrase:
f = lambda x: self.confirm_seed(seed, x)
self.passphrase_dialog(run_next=f)
else:
self.run('confirm_seed', seed, '')
def confirm_seed(self, seed, passphrase):
f = lambda x: self.confirm_passphrase(seed, passphrase)
self.confirm_seed_dialog(run_next=f, test=lambda x: x==seed)
def confirm_passphrase(self, seed, passphrase):
f = lambda x: self.run('create_keystore', seed, x)
if passphrase:
title = _('Confirm Seed Extension')
message = '\n'.join([
_('Your seed extension must be saved together with your seed.'),
_('Please type it here.'),
])
self.line_dialog(run_next=f, title=title, message=message, default='', test=lambda x: x==passphrase)
else:
f('')
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg)
|
pknight007/electrum-vtc
|
lib/base_wizard.py
|
Python
|
mit
| 16,334
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add Snapshot model
Create Date: 2016-07-28 14:09:21.338385
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2a5a39600741"
down_revision = "4afe69ce3c38"
def upgrade():
"""Add snapshots table"""
op.create_table(
"snapshots",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("parent_id", sa.Integer(), nullable=False),
sa.Column("parent_type", sa.String(length=250), nullable=False),
sa.Column("child_id", sa.Integer(), nullable=False),
sa.Column("child_type", sa.String(length=250), nullable=False),
sa.Column("revision_id", sa.Integer(), nullable=False),
sa.Column("context_id", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("modified_by_id", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["revision_id"], ["revisions.id"])
)
op.create_index("ix_snapshots_parent", "snapshots",
["parent_type", "parent_id"],
unique=False)
op.create_index("ix_snapshots_child", "snapshots",
["child_type", "child_id"],
unique=False)
op.create_index("fk_snapshots_contexts", "snapshots", ["context_id"],
unique=False)
op.create_index("ix_snapshots_updated_at", "snapshots", ["updated_at"],
unique=False)
op.create_unique_constraint(
None, "snapshots",
["parent_type", "parent_id", "child_type", "child_id"])
def downgrade():
"""Drop snapshots table and audit's FF for snapshots"""
op.drop_table("snapshots")
|
j0gurt/ggrc-core
|
src/ggrc/migrations/versions/20160728140921_2a5a39600741_add_snapshot_model.py
|
Python
|
apache-2.0
| 1,958
|
import os
import sys
def parse_args():
import optparse
parser = optparse.OptionParser()
parser.add_option('--where', default=None)
opts, args = parser.parse_args()
return opts, args
def run_tests(base_dir=None, apps=None, verbosity=1, interactive=False):
base_dir = base_dir or os.path.dirname(__file__)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.insert(0, os.path.join(base_dir, 'tests'))
import django
if django.VERSION >= (1, 7):
django.setup()
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity,
interactive=interactive, failfast=False)
if apps:
app_tests = [x.strip() for x in apps if x]
else:
app_tests = [
'generic_tagging',
]
failures = test_runner.run_tests(app_tests)
sys.exit(bool(failures))
if __name__ == '__main__':
opts, args = parse_args()
run_tests(opts.where, args)
|
giginet/django-generic-tagging
|
runtests.py
|
Python
|
mit
| 1,063
|
#!/usr/bin/env python
# Plotfreq plots spectral data from the buffer and allows interactive selection
# of frequency bands for further processing
#
# This software is part of the EEGsynth project, see <https://github.com/eegsynth/eegsynth>.
#
# Copyright (C) 2017-2020 EEGsynth project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyqtgraph.Qt import QtGui, QtCore
import configparser
import redis
import argparse
import numpy as np
import os
import pyqtgraph as pg
import sys
import time
import signal
from scipy.fftpack import fft, fftfreq
from scipy.signal import detrend
if hasattr(sys, 'frozen'):
path = os.path.split(sys.executable)[0]
file = os.path.split(sys.executable)[-1]
name = os.path.splitext(file)[0]
elif __name__=='__main__' and sys.argv[0] != '':
path = os.path.split(sys.argv[0])[0]
file = os.path.split(sys.argv[0])[-1]
name = os.path.splitext(file)[0]
elif __name__=='__main__':
path = os.path.abspath('')
file = os.path.split(path)[-1] + '.py'
name = os.path.splitext(file)[0]
else:
path = os.path.split(__file__)[0]
file = os.path.split(__file__)[-1]
name = os.path.splitext(file)[0]
# eegsynth/lib contains shared modules
sys.path.insert(0, os.path.join(path, '../../lib'))
import EEGsynth
import FieldTrip
def _setup():
'''Initialize the module
This adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, ft_host, ft_port, ft_input
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inifile", default=os.path.join(path, name + '.ini'), help="name of the configuration file")
args = parser.parse_args()
config = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
config.read(args.inifile)
try:
r = redis.StrictRedis(host=config.get('redis', 'hostname'), port=config.getint('redis', 'port'), db=0, charset='utf-8', decode_responses=True)
response = r.client_list()
except redis.ConnectionError:
raise RuntimeError("cannot connect to Redis server")
# combine the patching from the configuration file and Redis
patch = EEGsynth.patch(config, r)
# this can be used to show parameters that have changed
monitor = EEGsynth.monitor(name=name, debug=patch.getint('general', 'debug'))
try:
ft_host = patch.getstring('fieldtrip', 'hostname')
ft_port = patch.getint('fieldtrip', 'port')
monitor.info('Trying to connect to buffer on %s:%i ...' % (ft_host, ft_port))
ft_input = FieldTrip.Client()
ft_input.connect(ft_host, ft_port)
monitor.info("Connected to input FieldTrip buffer")
except:
raise RuntimeError("cannot connect to input FieldTrip buffer")
def _start():
'''Start the module
This uses the global variables from setup and adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, ft_host, ft_port, ft_input, name
global timeout, hdr_input, start, channels, window, clipsize, stepsize, historysize, lrate, scale_red, scale_blue, offset_red, offset_blue, winx, winy, winwidth, winheight, prefix, numhistory, freqaxis, history, showred, showblue, filtorder, filter, freqrange, notch, app, win, text_redleft_curr, text_redright_curr, text_blueleft_curr, text_blueright_curr, text_redleft_hist, text_redright_hist, text_blueleft_hist, text_blueright_hist, freqplot_curr, freqplot_hist, spect_curr, spect_hist, redleft_curr, redright_curr, blueleft_curr, blueright_curr, redleft_hist, redright_hist, blueleft_hist, blueright_hist, fft_curr, fft_hist, specmax_curr, specmin_curr, specmax_hist, specmin_hist, plotnr, channr, timer, begsample, endsample, taper
# this is the timeout for the FieldTrip buffer
timeout = patch.getfloat('fieldtrip', 'timeout', default=30)
hdr_input = None
start = time.time()
while hdr_input is None:
monitor.info("Waiting for data to arrive...")
if (time.time() - start) > timeout:
raise RuntimeError("timeout while waiting for data")
time.sleep(0.1)
hdr_input = ft_input.getHeader()
monitor.info("Data arrived")
monitor.debug(hdr_input)
monitor.debug(hdr_input.labels)
# read variables from ini/redis
channels = patch.getint('arguments', 'channels', multiple=True)
window = patch.getfloat('arguments', 'window', default=5.0) # in seconds
clipsize = patch.getfloat('arguments', 'clipsize', default=0.0) # in seconds
stepsize = patch.getfloat('arguments', 'stepsize', default=0.1) # in seconds
historysize = patch.getfloat('arguments', 'historysize', default=10) # in seconds
lrate = patch.getfloat('arguments', 'learning_rate', default=0.2)
scale_red = patch.getfloat('scale', 'red')
scale_blue = patch.getfloat('scale', 'blue')
offset_red = patch.getfloat('offset', 'red')
offset_blue = patch.getfloat('offset', 'blue')
winx = patch.getfloat('display', 'xpos')
winy = patch.getfloat('display', 'ypos')
winwidth = patch.getfloat('display', 'width')
winheight = patch.getfloat('display', 'height')
prefix = patch.getstring('output', 'prefix')
window = int(round(window * hdr_input.fSample)) # in samples
clipsize = int(round(clipsize * hdr_input.fSample)) # in samples
numhistory = int(historysize / stepsize) # number of observations in the history
freqaxis = fftfreq((window-2*clipsize), 1. / hdr_input.fSample)
history = np.zeros((len(channels), freqaxis.shape[0], numhistory))
# this is used to taper the data prior to Fourier transforming
taper = np.hanning(window)
# ideally it should be possible to change these on the fly
showred = patch.getint('input', 'showred', default=1)
showblue = patch.getint('input', 'showblue', default=1)
# lowpass, highpass and bandpass are optional, but mutually exclusive
filtorder = 9
if patch.hasitem('arguments', 'bandpass'):
filter = patch.getfloat('arguments', 'bandpass', multiple=True)
elif patch.hasitem('arguments', 'lowpass'):
filter = patch.getfloat('arguments', 'lowpass')
filter = [np.nan, filter]
elif patch.hasitem('arguments', 'highpass'):
filter = patch.getfloat('arguments', 'highpass')
filter = [filter, np.nan]
else:
filter = [np.nan, np.nan]
# notch filtering is optional
notch = patch.getfloat('arguments', 'notch', default=np.nan)
# wait until there is enough data
begsample = -1
while begsample < 0:
time.sleep(0.1)
hdr_input = ft_input.getHeader()
if hdr_input != None:
begsample = hdr_input.nSamples - window
endsample = hdr_input.nSamples - 1
# initialize graphical window
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="EEGsynth plotspectral")
win.setWindowTitle('EEGsynth plotspectral')
win.setGeometry(winx, winy, winwidth, winheight)
# initialize graphical elements
text_redleft_curr = pg.TextItem("", anchor=( 1, 0), color='r')
text_redright_curr = pg.TextItem("", anchor=( 0, 0), color='r')
text_blueleft_curr = pg.TextItem("", anchor=( 1, -1), color='b')
text_blueright_curr = pg.TextItem("", anchor=( 0, -1), color='b')
text_redleft_hist = pg.TextItem("", anchor=( 1, 0), color='r')
text_redright_hist = pg.TextItem("", anchor=( 0, 0), color='r')
text_blueleft_hist = pg.TextItem("", anchor=( 1, -1), color='b')
text_blueright_hist = pg.TextItem("", anchor=( 0, -1), color='b')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
# Initialize variables
freqplot_curr = []
freqplot_hist = []
spect_curr = []
spect_hist = []
redleft_curr = []
redright_curr = []
blueleft_curr = []
blueright_curr = []
redleft_hist = []
redright_hist = []
blueleft_hist = []
blueright_hist = []
fft_curr = []
fft_hist = []
specmax_curr = []
specmin_curr = []
specmax_hist = []
specmin_hist = []
# Create panels for each channel
for plotnr, channr in enumerate(channels):
plot = win.addPlot(title="%s%s" % ('Spectrum channel ', channr))
# speeds up the initial axis scaling set the range to something different than [0, 0]
plot.setXRange(0,1)
plot.setYRange(0,1)
freqplot_curr.append(plot)
freqplot_curr[plotnr].setLabel('left', text='Power')
freqplot_curr[plotnr].setLabel('bottom', text='Frequency (Hz)')
spect_curr.append(freqplot_curr[plotnr].plot(pen='w'))
redleft_curr.append(freqplot_curr[plotnr].plot(pen='r'))
redright_curr.append(freqplot_curr[plotnr].plot(pen='r'))
blueleft_curr.append(freqplot_curr[plotnr].plot(pen='b'))
blueright_curr.append(freqplot_curr[plotnr].plot(pen='b'))
plot = win.addPlot(title="%s%s%s%s%s" % ('Averaged spectrum channel ', channr, ' (', historysize, 's)'))
# speeds up the initial axis scaling set the range to something different than [0, 0]
plot.setXRange(0,1)
plot.setYRange(0,1)
freqplot_hist.append(plot)
freqplot_hist[plotnr].setLabel('left', text='Power')
freqplot_hist[plotnr].setLabel('bottom', text='Frequency (Hz)')
spect_hist.append(freqplot_hist[plotnr].plot(pen='w'))
redleft_hist.append(freqplot_hist[plotnr].plot(pen='r'))
redright_hist.append(freqplot_hist[plotnr].plot(pen='r'))
blueleft_hist.append(freqplot_hist[plotnr].plot(pen='b'))
blueright_hist.append(freqplot_hist[plotnr].plot(pen='b'))
win.nextRow()
# initialize as lists
specmin_curr.append(None)
specmax_curr.append(None)
specmin_hist.append(None)
specmax_hist.append(None)
fft_curr.append(None)
fft_hist.append(None)
# print frequency at lines
freqplot_curr[0].addItem(text_redleft_curr)
freqplot_curr[0].addItem(text_redright_curr)
freqplot_curr[0].addItem(text_blueleft_curr)
freqplot_curr[0].addItem(text_blueright_curr)
freqplot_hist[0].addItem(text_redleft_hist)
freqplot_hist[0].addItem(text_redright_hist)
freqplot_hist[0].addItem(text_blueleft_hist)
freqplot_hist[0].addItem(text_blueright_hist)
signal.signal(signal.SIGINT, _stop)
# Set timer for update
timer = QtCore.QTimer()
timer.timeout.connect(_loop_once)
timer.setInterval(10) # timeout in milliseconds
timer.start(int(round(stepsize * 1000))) # in milliseconds
def _loop_once():
'''Update the main figure once
This uses the global variables from setup and start, and adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, ft_host, ft_port, ft_input
global timeout, hdr_input, start, channels, window, clipsize, stepsize, historysize, lrate, scale_red, scale_blue, offset_red, offset_blue, winx, winy, winwidth, winheight, prefix, numhistory, freqaxis, history, showred, showblue, filtorder, filter, notch, app, win, text_redleft_curr, text_redright_curr, text_blueleft_curr, text_blueright_curr, text_redleft_hist, text_redright_hist, text_blueleft_hist, text_blueright_hist, freqplot_curr, freqplot_hist, spect_curr, spect_hist, redleft_curr, redright_curr, blueleft_curr, blueright_curr, redleft_hist, redright_hist, blueleft_hist, blueright_hist, fft_curr, fft_hist, specmax_curr, specmin_curr, specmax_hist, specmin_hist, plotnr, channr, timer, begsample, endsample, taper
global dat, arguments_freqrange, freqrange, redfreq, redwidth, bluefreq, bluewidth
monitor.loop()
hdr_input = ft_input.getHeader()
if (hdr_input.nSamples-1)<endsample:
monitor.info("buffer reset detected")
begsample = -1
while begsample < 0:
hdr_input = ft_input.getHeader()
begsample = hdr_input.nSamples - window
endsample = hdr_input.nSamples - 1
# get the last available data
begsample = (hdr_input.nSamples - window) # the clipsize will be removed from both sides after filtering
endsample = (hdr_input.nSamples - 1)
monitor.info("reading from sample %d to %d" % (begsample, endsample))
dat = ft_input.getData([begsample, endsample]).astype(np.double)
# demean the data to prevent spectral leakage
if patch.getint('arguments', 'demean', default=1):
dat = detrend(dat, axis=0, type='constant')
# detrend the data to prevent spectral leakage
# this is rather slow, hence the default is not to detrend
if patch.getint('arguments', 'detrend', default=0):
dat = detrend(dat, axis=0, type='linear')
# apply the user-defined filtering
if not np.isnan(filter[0]) and not np.isnan(filter[1]):
dat = EEGsynth.butter_bandpass_filter(dat.T, filter[0], filter[1], int(hdr_input.fSample), filtorder).T
elif not np.isnan(filter[1]):
dat = EEGsynth.butter_lowpass_filter(dat.T, filter[1], int(hdr_input.fSample), filtorder).T
elif not np.isnan(filter[0]):
dat = EEGsynth.butter_highpass_filter(dat.T, filter[0], int(hdr_input.fSample), filtorder).T
if not np.isnan(notch):
dat = EEGsynth.notch_filter(dat.T, notch, hdr_input.fSample).T
# remove the filter padding
if clipsize > 0:
dat = dat[clipsize:-clipsize,:]
# taper the data
dat = dat * taper[:, np.newaxis]
# shift the FFT history by one step
history = np.roll(history, 1, axis=2)
for plotnr, channr in enumerate(channels):
# estimate the absolute FFT amplitude at the current moment
fft_curr[plotnr] = abs(fft(dat[:, channr-1]))
# update the FFT history with the current estimate
history[plotnr, :, numhistory - 1] = fft_curr[plotnr]
fft_hist = np.mean(history, axis=2)
# user-selected frequency band
arguments_freqrange = patch.getfloat('arguments', 'freqrange', multiple=True)
freqrange = np.greater(freqaxis, arguments_freqrange[0]) & np.less_equal(freqaxis, arguments_freqrange[1])
# adapt the vertical scale to the running mean of the min/max
if specmax_curr[plotnr]==None:
specmax_curr[plotnr] = max(fft_curr[plotnr][freqrange])
specmin_curr[plotnr] = min(fft_curr[plotnr][freqrange])
specmax_hist[plotnr] = max(fft_hist[plotnr][freqrange])
specmin_hist[plotnr] = min(fft_hist[plotnr][freqrange])
else:
specmax_curr[plotnr] = (1 - lrate) * float(specmax_curr[plotnr]) + lrate * max(fft_curr[plotnr][freqrange])
specmin_curr[plotnr] = (1 - lrate) * float(specmin_curr[plotnr]) + lrate * min(fft_curr[plotnr][freqrange])
specmax_hist[plotnr] = (1 - lrate) * float(specmax_hist[plotnr]) + lrate * max(fft_hist[plotnr][freqrange])
specmin_hist[plotnr] = (1 - lrate) * float(specmin_hist[plotnr]) + lrate * min(fft_hist[plotnr][freqrange])
# update the axes
freqplot_curr[plotnr].setXRange(arguments_freqrange[0], arguments_freqrange[1])
freqplot_hist[plotnr].setXRange(arguments_freqrange[0], arguments_freqrange[1])
freqplot_curr[plotnr].setYRange(specmin_curr[plotnr], specmax_curr[plotnr])
freqplot_hist[plotnr].setYRange(specmin_hist[plotnr], specmax_hist[plotnr])
# update the spectra
spect_curr[plotnr].setData(freqaxis[freqrange], fft_curr[plotnr][freqrange])
spect_hist[plotnr].setData(freqaxis[freqrange], fft_hist[plotnr][freqrange])
# update the vertical plotted lines
if showred:
redfreq = patch.getfloat('input', 'redfreq', default=10. / arguments_freqrange[1])
redfreq = EEGsynth.rescale(redfreq, slope=scale_red, offset=offset_red) * arguments_freqrange[1]
redwidth = patch.getfloat('input', 'redwidth', default=1. / arguments_freqrange[1])
redwidth = EEGsynth.rescale(redwidth, slope=scale_red, offset=offset_red) * arguments_freqrange[1]
redleft_curr[plotnr].setData(x=[redfreq - redwidth, redfreq - redwidth], y=[specmin_curr[plotnr], specmax_curr[plotnr]])
redright_curr[plotnr].setData(x=[redfreq + redwidth, redfreq + redwidth], y=[specmin_curr[plotnr], specmax_curr[plotnr]])
redleft_hist[plotnr].setData(x=[redfreq - redwidth, redfreq - redwidth], y=[specmin_hist[plotnr], specmax_hist[plotnr]])
redright_hist[plotnr].setData(x=[redfreq + redwidth, redfreq + redwidth], y=[specmin_hist[plotnr], specmax_hist[plotnr]])
# update labels at the vertical lines
text_redleft_curr.setText('%0.1f' % (redfreq - redwidth))
text_redleft_curr.setPos(redfreq - redwidth, specmax_curr[0])
text_redright_curr.setText('%0.1f' % (redfreq + redwidth))
text_redright_curr.setPos(redfreq + redwidth, specmax_curr[0])
text_redleft_hist.setText('%0.1f' % (redfreq - redwidth))
text_redleft_hist.setPos(redfreq - redwidth, specmax_hist[0])
text_redright_hist.setText('%0.1f' % (redfreq + redwidth))
text_redright_hist.setPos(redfreq + redwidth, specmax_hist[0])
# write the positions of the lines to Redis
key = "%s.%s.%s" % (prefix, 'redband', 'low')
patch.setvalue(key, redfreq - redwidth)
key = "%s.%s.%s" % (prefix, 'redband', 'high')
patch.setvalue(key, redfreq + redwidth)
if showblue:
bluefreq = patch.getfloat('input', 'bluefreq', default=20. / arguments_freqrange[1])
bluefreq = EEGsynth.rescale(bluefreq, slope=scale_blue, offset=offset_blue) * arguments_freqrange[1]
bluewidth = patch.getfloat('input', 'bluewidth', default=4. / arguments_freqrange[1])
bluewidth = EEGsynth.rescale(bluewidth, slope=scale_blue, offset=offset_blue) * arguments_freqrange[1]
blueleft_curr[plotnr].setData(x=[bluefreq - bluewidth, bluefreq - bluewidth], y=[specmin_curr[plotnr], specmax_curr[plotnr]])
blueright_curr[plotnr].setData(x=[bluefreq + bluewidth, bluefreq + bluewidth], y=[specmin_curr[plotnr], specmax_curr[plotnr]])
blueleft_hist[plotnr].setData(x=[bluefreq - bluewidth, bluefreq - bluewidth], y=[specmin_hist[plotnr], specmax_hist[plotnr]])
blueright_hist[plotnr].setData(x=[bluefreq + bluewidth, bluefreq + bluewidth], y=[specmin_hist[plotnr], specmax_hist[plotnr]])
# update labels at the vertical lines
text_blueleft_curr.setText('%0.1f' % (bluefreq - bluewidth))
text_blueleft_curr.setPos(bluefreq - bluewidth, specmax_curr[0])
text_blueright_curr.setText('%0.1f' % (bluefreq + bluewidth))
text_blueright_curr.setPos(bluefreq + bluewidth, specmax_curr[0])
text_blueleft_hist.setText('%0.1f' % (bluefreq - bluewidth))
text_blueleft_hist.setPos(bluefreq - bluewidth, specmax_hist[0])
text_blueright_hist.setText('%0.1f' % (bluefreq + bluewidth))
text_blueright_hist.setPos(bluefreq + bluewidth, specmax_hist[0])
# write the positions of the lines to Redis
key = "%s.%s.%s" % (prefix, 'blueband', 'low')
patch.setvalue(key, bluefreq - bluewidth)
key = "%s.%s.%s" % (prefix, 'blueband', 'high')
patch.setvalue(key, bluefreq + bluewidth)
def _loop_forever():
'''Run the main loop forever
'''
QtGui.QApplication.instance().exec_()
def _stop(*args):
'''Stop and clean up on SystemExit, KeyboardInterrupt
'''
QtGui.QApplication.quit()
if __name__ == '__main__':
_setup()
_start()
try:
_loop_forever()
except (SystemExit, KeyboardInterrupt, RuntimeError):
_stop()
|
robertoostenveld/eegsynth-matlab
|
module/plotspectral/plotspectral.py
|
Python
|
gpl-2.0
| 20,540
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.