code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""Converts between units of distance measuring"""
import sys
import readchar
def read_char():
"""Detects key strokes"""
key_stroke = str(repr(readchar.readkey()))
key_stroke = key_stroke.replace("'", "")
if key_stroke == "1":
return "meters_to_feet"
elif key_stroke == "2":
return "feet_to_meters"
elif key_stroke == "\\x03":
sys.exit(1)
METERS_TO_FEET = lambda x: x*3.28084
FEET_TO_METERS = lambda x: x/3.28084
while True:
print("Press 1 to convert meters to feet, press 2 to convert feet to meters")
CHARINPUT = read_char()
NUMBER = raw_input("Please enter the number: ")
if CHARINPUT == "meters_to_feet":
print(METERS_TO_FEET(int(NUMBER)))
elif CHARINPUT == "feet_to_meters":
print(FEET_TO_METERS(int(NUMBER)))
| CruyeEblon/Programming_Classes | distance_converter.py | Python | mit | 801 |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ipaddr import IPv4Address
from maas_common import (status_ok, status_err, metric, get_keystone_client,
get_auth_ref, metric_bool, print_output)
from requests import Session
from requests import exceptions as exc
def check(auth_ref, args):
# We call get_keystone_client here as there is some logic within to get a
# new token if previous one is bad.
keystone = get_keystone_client(auth_ref)
auth_token = keystone.auth_token
registry_endpoint = 'http://{ip}:9191'.format(ip=args.ip)
s = Session()
s.headers.update(
{'Content-type': 'application/json',
'x-auth-token': auth_token})
try:
# /images returns a list of public, non-deleted images
r = s.get('%s/images' % registry_endpoint, verify=False, timeout=10)
is_up = r.ok
except (exc.ConnectionError, exc.HTTPError, exc.Timeout):
is_up = False
except Exception as e:
status_err(str(e))
status_ok()
metric_bool('glance_registry_local_status', is_up)
# only want to send other metrics if api is up
if is_up:
milliseconds = r.elapsed.total_seconds() * 1000
metric('glance_registry_local_response_time', 'double',
'%.3f' % milliseconds, 'ms')
def main(args):
auth_ref = get_auth_ref()
check(auth_ref, args)
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description='Check glance registry')
parser.add_argument('ip',
type=IPv4Address,
help='glance registry IP address')
args = parser.parse_args()
main(args)
| jpmontez/rpc-openstack | maas/glance_registry_local_check.py | Python | apache-2.0 | 2,294 |
from .permission import *
from .serializers import ArticleSerializer
from .models import Article
from rest_framework.filters import SearchFilter
from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from django.db.models import Q
# Create your views here.
class dataWrapper(object):
def __init__(self, data):
self.data = data
class ArticleView(ModelViewSet):
queryset = Article.objects.all()
permission_classes = (ArticleLonginPermission,)
serializer_class = ArticleSerializer
filter_backends = (SearchFilter,)
search_fields = ('title', 'author__username', 'body', 'status')
def addAuthorField(self, request):
return dataWrapper(dict(request.data, **{'author':request.user.username}))
def create(self, request, *args, **kwargs):
return super(ArticleView, self).create(self.addAuthorField(request), *args, **kwargs)
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
if request.user.is_staff:
_queryset = queryset
else:
_queryset = queryset.filter(Q(author=request.user) | Q(status='published'))
page = self.paginate_queryset(_queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(_queryset, many=True)
return Response(serializer.data)
| shady831213/myBlog | myBlog/articles/views.py | Python | mit | 1,502 |
# MySQL Connector/Python - MySQL driver written in Python.
import django
if django.VERSION < (1, 8):
from django.db.backends import BaseDatabaseValidation
else:
from django.db.backends.base.validation import BaseDatabaseValidation
if django.VERSION < (1, 7):
from django.db import models
else:
from django.core import checks
from django.db import connection
class DatabaseValidation(BaseDatabaseValidation):
if django.VERSION < (1, 7):
def validate_field(self, errors, opts, f):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
varchar_fields = (models.CharField,
models.CommaSeparatedIntegerField,
models.SlugField)
if isinstance(f, varchar_fields) and f.max_length > 255 and f.unique:
msg = ('"%(name)s": %(cls)s cannot have a "max_length" greater '
'than 255 when using "unique=True".')
errors.add(opts, msg % {'name': f.name,
'cls': f.__class__.__name__})
else:
def check_field(self, field, **kwargs):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
# Django 1.7
errors = super(DatabaseValidation, self).check_field(field,
**kwargs)
# Ignore any related fields.
if getattr(field, 'rel', None) is None:
field_type = field.db_type(connection)
if (field_type.startswith('varchar') # Look for CharFields...
and field.unique # ... that are unique
and (field.max_length is None or
int(field.max_length) > 255)):
errors.append(
checks.Error(
('MySQL does not allow unique CharFields to have a '
'max_length > 255.'),
hint=None,
obj=field,
id='mysql.E001',
)
)
return errors
| rupace10/mysql-connector-python | lib/mysql/connector/django/validation.py | Python | gpl-2.0 | 2,529 |
'''
IDE: Eclipse (PyDev)
Python version: 2.7
Operating system: Windows 8.1
@author: Emil Carlsson
@copyright: 2015 Emil Carlsson
@license: This program is distributed under the terms of the GNU General Public License
'''
import unittest
from Model import Card
class TestCard(unittest.TestCase):
def testCardSetAP(self):
expected = 5
card = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", 1, 1, 1)
try:
card.AP = expected
self.assertEqual(card.AP, expected, "card.AP not set correct (assert.equal)")
try:
card.AP = -4
self.fail("AP able to be negative")
except:
pass
except:
self.fail("Card.AP not set correct (assert.fail)")
pass
def testCardSetHP(self):
expected = 5
expectedDecrease = 3
card = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", 1, 1, 1)
try:
card.HP = expected
self.assertEqual(card.HP, expected, "card.HP not set correct (Assert.Equal)")
card.HP = card.HP - expectedDecrease
expected = expected - expectedDecrease
self.assertEqual(card.HP, expected, "card.HP not decreased correct")
try:
expected = -5
card.HP = expected
self.fail("card.HP able to be negative")
except:
pass
except:
self.fail("card.HP not set correct (Assert.Fail)")
pass
def testCardSetName(self):
card = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", 1, 1, 1)
expected = "Card name"
try:
card.name = expected
self.assertEqual(card.name, expected, "Card.name not set correct (Assert.Equal)")
except:
self.fail("Card.name not set correct (Assert.Fail)")
pass
def testCardSetDP(self):
card = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", 1, 1, 1)
expected = 5
try:
card.DP = expected
self.assertEqual(card.DP, expected, "Card.DP not set correct (Assert.Fail)")
except:
self.fail("Card.DP not set correct (Assert.Fail)")
try:
expected = -4
card.DP = expected
self.fail("Card.DP able to be set negative")
except:
pass
pass
def testCardBattle(self):
cardOneAp = 5
cardOneDp = 6
cardOneHp = 4
cardTwoAp = 4
cardTwoDp = 4
cardTwoHp = 7
cardOne = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", cardOneAp, cardOneDp, cardOneHp)
cardTwo = Card.Card("Card two", "Card two description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", cardTwoAp, cardTwoDp, cardTwoHp)
expectedWinnerAttackOne = cardOne
actualWinnerAttackOne = None
try:
actualWinnerAttackOne = cardTwo.defend(cardOne)
except:
self.fail("card.defend throw exception")
self.assertEquals(expectedWinnerAttackOne, actualWinnerAttackOne, "Wrong card winner ap > dp")
self.assertTrue(cardTwoHp > cardTwo.HP, "Hp not decreased correct ap > dp")
newCardTwoHp = cardTwo.HP
expectedWinnerAttackTwo = cardOne
actualWinnerAttackTwo = cardOne.defend(cardTwo)
self.assertEquals(expectedWinnerAttackTwo, actualWinnerAttackTwo, "Wrong card winner dp > ap")
self.assertTrue(newCardTwoHp > cardTwo.HP, "Hp not decreased correct dp > ap")
pass
def testKillCard(self):
cardOneAp = 5
cardOneDp = 6
cardOneHp = 4
cardTwoAp = 4
cardTwoDp = 2
cardTwoHp = 1
cardOne = Card.Card("Card one", "Card one Description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", cardOneAp, cardOneDp, cardOneHp)
cardTwo = Card.Card("Card two", "Card two description", "https://sv.wikipedia.org/wiki/Portal:Huvudsida#/media/File:Panama_Canal_Gatun_Locks.jpg", cardTwoAp, cardTwoDp, cardTwoHp)
winner = cardTwo.defend(cardOne)
self.assertEqual(winner, cardOne, "Wrong card won")
self.assertLess(cardTwo.HP, 1, "Card two did not die")
self.assertFalse(cardTwo.IsAlive, "Card two is marked as alive")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | gingerswede/ITSecCardGame | src/UnitTests/TestCard.py | Python | gpl-3.0 | 5,344 |
from builtins import str
import uuid
from ckan import model
from ckan.lib import dictization
from ckan.plugins import toolkit
from sqlalchemy import Column, types
from sqlalchemy.ext.declarative import declarative_base
import logging
log = logging.getLogger(__name__)
Base = declarative_base()
def make_uuid():
return str(uuid.uuid4())
class ApplyPermission(Base):
__tablename__ = 'apply_permission'
id = Column(types.UnicodeText, primary_key=True, default=make_uuid)
organization_id = Column(types.UnicodeText, nullable=False)
target_organization_id = Column(types.UnicodeText, nullable=False)
business_code = Column(types.UnicodeText, nullable=False)
contact_name = Column(types.UnicodeText, nullable=False)
contact_email = Column(types.UnicodeText, nullable=False)
ip_address_list = Column(types.JSON, nullable=False)
subsystem_id = Column(types.UnicodeText, nullable=False)
subsystem_code = Column(types.UnicodeText, nullable=False)
service_code_list = Column(types.JSON, nullable=False)
usage_description = Column(types.UnicodeText)
request_date = Column(types.Date)
@classmethod
def create(cls, organization_id, target_organization_id, business_code, contact_name, contact_email,
ip_address_list, subsystem_code, subsystem_id, service_code_list, usage_description, request_date):
apply_permission = ApplyPermission(organization_id=organization_id,
target_organization_id=target_organization_id,
business_code=business_code,
contact_name=contact_name,
contact_email=contact_email,
ip_address_list=ip_address_list,
subsystem_code=subsystem_code,
subsystem_id=subsystem_id,
service_code_list=service_code_list,
usage_description=usage_description,
request_date=request_date)
model.Session.add(apply_permission)
model.repo.commit()
return apply_permission.id
@classmethod
def get(cls, application_id):
return model.Session.query(cls).filter(cls.id == application_id).first()
def as_dict(self):
context = {'model': model}
application_dict = dictization.table_dictize(self, context)
application_dict['requester_subsystem'] = toolkit.get_action('package_show')(
{'ignore_auth': True}, {'id': application_dict['subsystem_id']})
application_dict['subsystem'] = toolkit.get_action('package_show')(
{'ignore_auth': True}, {'id': application_dict['subsystem_code']})
application_dict['member'] = toolkit.get_action('organization_show')(
{'ignore_auth': True}, {'id': application_dict['subsystem']['owner_org']})
application_dict['services'] = [toolkit.get_action('resource_show')(
{'ignore_auth': True}, {'id': service}) for service in application_dict['service_code_list']]
application_dict['organization'] = toolkit.get_action('organization_show')(
{'ignore_auth': True}, {'id': application_dict['organization_id']})
application_dict['target_organization'] = toolkit.get_action('organization_show')(
{'ignore_auth': True}, {'id': application_dict['target_organization_id']})
return application_dict
def init_table(engine):
Base.metadata.create_all(engine)
log.info("Table for applying permissions is set-up")
| vrk-kpa/api-catalog | ckanext/ckanext-apply_permissions_for_service/ckanext/apply_permissions_for_service/model.py | Python | mit | 3,739 |
from .grammar_checker import *
| jmchrl/opb | grammalecte/__init__.py | Python | gpl-3.0 | 32 |
import org_demo_file_2
org_demo_file_2.external_function("call2")
foobar = 10
def foo():
org_demo_file_2.external_function("call2")
| vibhavp/emacs-lsp | test/fixtures/org-mode/org_demo_file.py | Python | gpl-3.0 | 140 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import getpass
import os
import time
import invoke
import requests
JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder"
def wait_for_build_completed(session):
# Wait 20 seconds before actually checking if the build is complete, to
# ensure that it had time to really start.
time.sleep(20)
while True:
response = session.get(
"{0}/lastBuild/api/json/".format(JENKINS_URL),
headers={
"Accept": "application/json",
}
)
response.raise_for_status()
if not response.json()["building"]:
assert response.json()["result"] == "SUCCESS"
break
time.sleep(0.1)
def download_artifacts(session):
response = session.get(
"{0}/lastBuild/api/json/".format(JENKINS_URL),
headers={
"Accept": "application/json"
}
)
response.raise_for_status()
assert not response.json()["building"]
assert response.json()["result"] == "SUCCESS"
paths = []
for run in response.json()["runs"]:
response = session.get(
run["url"] + "api/json/",
headers={
"Accept": "application/json",
}
)
response.raise_for_status()
for artifact in response.json()["artifacts"]:
response = session.get(
"{0}artifact/{1}".format(run["url"], artifact["relativePath"])
)
out_path = os.path.join(
os.path.dirname(__file__),
"dist",
artifact["fileName"],
)
with open(out_path, "wb") as f:
f.write(response.content)
paths.append(out_path)
return paths
@invoke.task
def release(version):
"""
``version`` should be a string like '0.4' or '1.0'.
"""
invoke.run("git tag -s {0} -m '{0} release'".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("cd vectors/ && python setup.py sdist bdist_wheel")
invoke.run(
"twine upload -s dist/cryptography-{0}* "
"vectors/dist/cryptography_vectors-{0}*".format(version)
)
session = requests.Session()
# This tells the CDN to delete the cached response for the URL. We do this
# so that the Jenkins builders will see the new sdist immediately when they
# go to build the wheels.
response = session.request(
"PURGE", "https://pypi.python.org/simple/cryptography/"
)
response.raise_for_status()
username = getpass.getpass("Input the GitHub/Jenkins username: ")
token = getpass.getpass("Input the Jenkins token: ")
response = session.post(
"{0}/build".format(JENKINS_URL),
auth=requests.auth.HTTPBasicAuth(
username, token
),
params={
"cause": "Building wheels for {0}".format(version)
}
)
response.raise_for_status()
wait_for_build_completed(session)
paths = download_artifacts(session)
invoke.run("twine upload {0}".format(" ".join(paths)))
| bwhmather/cryptography | tasks.py | Python | bsd-3-clause | 3,366 |
import matplotlib.pyplot as plt
import numpy as np
fig, ax = plt.subplots()
rect = plt.Rectangle((np.pi, -0.5), 1, 1, fc=np.random.random(3), picker=True)
ax.add_patch(rect)
x = np.linspace(0, np.pi*2, 100)
y = np.sin(x)
line, = plt.plot(x, y, picker=8.0)
def on_pick(event):
artist = event.artist
if isinstance(artist, plt.Line2D):
lw = artist.get_linewidth()
artist.set_linewidth(lw % 5 + 1)
else:
artist.set_fc(np.random.random(3))
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', on_pick);
plt.show() | UpSea/midProjects | BasicOperations/04_Matplotlib/07_mouseEvent_selection.py | Python | mit | 554 |
from __future__ import absolute_import
import logging
from kudzu.context import CONTEXT_VARS, RequestContext
class RequestContextFilter(object):
"""Logging filter which injects information about a current request.
`RequestContextFilter` accepts all log records and extends them by
contextual information about the current request. Its constructor takes
names of attributes which should be added to log records.
This filter should be added to logging handlers not to loggers because
filters are not executed for records logged by child loggers.
`RequestContextFilter` depends on `RequestContextMiddleware`
to make `RequestContext` globally available.
Functions `kudzify_handler` and `kudzify_logger` simplify configuration
of loggers with this instances of this class.
"""
def __init__(self, keys):
self.keys = tuple(keys)
def filter(self, record):
context = RequestContext.get()
log_vars = context.log_vars if context else {}
for key in self.keys:
value = log_vars.get(key, '-')
setattr(record, key, value)
return True
BASIC_FORMAT = "[%(addr)s|%(rid)s] %(levelname)s:%(name)s:%(message)s"
def kudzify_handler(handler, format=BASIC_FORMAT):
"""Extends format string of a handler by request context placeholders.
Takes a logging handler instance format string with `CONTEXT_VARS`
placeholders. It configures `RequestContextFilter` to extract necessary
variables from a `RequestContext`, attaches the filter to the given
handler, and replaces handler formatter.
"""
keys = []
for key in CONTEXT_VARS:
if '%%(%s)' % key in format:
keys.append(key)
context_filter = RequestContextFilter(keys)
handler.formatter = logging.Formatter(format)
handler.addFilter(context_filter)
def kudzify_logger(logger=None, format=BASIC_FORMAT):
"""Extends format string of a logger by request context placeholders.
It calls `kudzify_handler` on each handler registered to the given
logger. So this function must be called after handlers are configured.
"""
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
for handler in logger.handlers:
kudzify_handler(handler, format=format)
| mila/kudzu | kudzu/logging.py | Python | bsd-3-clause | 2,324 |
#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import logging
import serial
import time
import os
from neblinaCommunication import NeblinaCommunication
from pyslip import slip
###################################################################################
class NeblinaUART(NeblinaCommunication):
def __init__(self, address):
NeblinaCommunication.__init__(self, address)
self.comslip = slip.slip()
self.sc = None
def connect(self):
# Try to open the serial COM port
logging.debug("Opening COM port : {0}".format(self.address))
self.sc = None
while self.sc is None:
try:
self.sc = serial.Serial(port=self.address, baudrate=500000)
except serial.serialutil.SerialException as se:
if 'Device or resource busy:' in se.__str__():
logging.info('Opening COM port is taking a little while, please stand by...')
else:
logging.error('se: {0}'.format(se))
time.sleep(1)
self.sc.flushInput()
def disconnect(self):
logging.debug("Closing COM port : {0}".format(self.address))
self.sc.close()
def isConnected(self):
if os.name == "posix":
return self.sc and self.sc.is_open
else:
return self.sc and self.sc.isOpen()
def receivePacket(self):
packet = None
try:
packet = self.comslip.receivePacketFromStream(self.sc)
except KeyboardInterrupt:
pass
return packet
def sendPacket(self, packet):
self.comslip.sendPacketToStream(self.sc, packet)
| Motsai/neblina-python | neblinaUART.py | Python | mit | 2,947 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from airflow.hooks.druid_hook import DruidHook
from airflow.models import BaseOperator
class DruidOperator(BaseOperator):
"""
Allows to submit a task directly to druid
:param json_index_file: The filepath to the druid index specification
:type json_index_file: str
:param druid_ingest_conn_id: The connection id of the Druid overlord which accepts index jobs
:type druid_ingest_conn_id: str
"""
template_fields = ('index_spec_str',)
template_ext = ('.json',)
def __init__(
self,
json_index_file,
druid_ingest_conn_id='druid_ingest_default',
max_ingestion_time=None,
*args, **kwargs):
super(DruidOperator, self).__init__(*args, **kwargs)
self.conn_id = druid_ingest_conn_id
self.max_ingestion_time = max_ingestion_time
with open(json_index_file) as data_file:
index_spec = json.load(data_file)
self.index_spec_str = json.dumps(
index_spec,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
def execute(self, context):
hook = DruidHook(
druid_ingest_conn_id=self.conn_id,
max_ingestion_time=self.max_ingestion_time
)
self.log.info("Sumitting %s", self.index_spec_str)
hook.submit_indexing_job(self.index_spec_str)
| KL-WLCR/incubator-airflow | airflow/contrib/operators/druid_operator.py | Python | apache-2.0 | 1,947 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.revlinks import RevlinkMatch, GithubRevlink, SourceforgeGitRevlink, GitwebMatch
class TestGithubRevlink(unittest.TestCase):
revision = 'b6874701b54e0043a78882b020afc86033133f91'
url = 'https://github.com/buildbot/buildbot/commit/b6874701b54e0043a78882b020afc86033133f91'
def testHTTPS(self):
self.assertEqual(GithubRevlink(self.revision, 'https://github.com/buildbot/buildbot.git'),
self.url)
def testGIT(self):
self.assertEqual(GithubRevlink(self.revision, 'git://github.com/buildbot/buildbot.git'),
self.url)
def testSSH(self):
self.assertEqual(GithubRevlink(self.revision, 'git@github.com:buildbot/buildbot.git'),
self.url)
def testSSHuri(self):
self.assertEqual(GithubRevlink(self.revision, 'ssh://git@github.com/buildbot/buildbot.git'),
self.url)
class TestSourceforgeGitRevlink(unittest.TestCase):
revision = 'b99c89a2842d386accea8072ae5bb6e24aa7cf29'
url = 'http://gemrb.git.sourceforge.net/git/gitweb.cgi?p=gemrb/gemrb;a=commit;h=b99c89a2842d386accea8072ae5bb6e24aa7cf29'
def testGIT(self):
self.assertEqual(SourceforgeGitRevlink(self.revision, 'git://gemrb.git.sourceforge.net/gitroot/gemrb/gemrb'),
self.url)
def testSSH(self):
self.assertEqual(SourceforgeGitRevlink(self.revision, 'somebody@gemrb.git.sourceforge.net:gitroot/gemrb/gemrb'),
self.url)
def testSSHuri(self):
self.assertEqual(SourceforgeGitRevlink(self.revision, 'ssh://somebody@gemrb.git.sourceforge.net/gitroot/gemrb/gemrb'),
self.url)
class TestRevlinkMatch(unittest.TestCase):
def testNotmuch(self):
revision = 'f717d2ece1836c863f9cc02abd1ff2539307cd1d'
matcher = RevlinkMatch(['git://notmuchmail.org/git/(.*)'],
r'http://git.notmuchmail.org/git/\1/commit/%s')
self.assertEquals(matcher(revision, 'git://notmuchmail.org/git/notmuch'),
'http://git.notmuchmail.org/git/notmuch/commit/f717d2ece1836c863f9cc02abd1ff2539307cd1d')
def testSingleString(self):
revision = 'rev'
matcher = RevlinkMatch('test', 'out%s')
self.assertEquals(matcher(revision, 'test'), 'outrev')
def testSingleUnicode(self):
revision = 'rev'
matcher = RevlinkMatch(u'test', 'out%s')
self.assertEquals(matcher(revision, 'test'), 'outrev')
def testTwoCaptureGroups(self):
revision = 'rev'
matcher = RevlinkMatch('([A-Z]*)Z([0-9]*)', r'\2-\1-%s')
self.assertEquals(matcher(revision, 'ABCZ43'), '43-ABC-rev')
class TestGitwebMatch(unittest.TestCase):
def testOrgmode(self):
revision = '490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8'
matcher = GitwebMatch('git://orgmode.org/(?P<repo>.*)', 'http://orgmode.org/w/')
self.assertEquals(matcher(revision, 'git://orgmode.org/org-mode.git'),
'http://orgmode.org/w/?p=org-mode.git;a=commit;h=490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8')
| denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_revlinks.py | Python | mit | 3,783 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for compiled languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from string import Template
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined, inherit, do_insertions
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Literal, Generic
from pygments.scanner import Scanner
# backwards compatibility
from pygments.lexers.functional import OcamlLexer
from pygments.lexers.jvm import JavaLexer, ScalaLexer
__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
'NesCLexer', 'DylanLexer', 'ObjectiveCLexer', 'ObjectiveCppLexer',
'FortranLexer', 'GLShaderLexer', 'PrologLexer', 'CythonLexer',
'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer', 'AdaLexer',
'Modula2Lexer', 'BlitzMaxLexer', 'BlitzBasicLexer', 'NimrodLexer',
'FantomLexer', 'RustLexer', 'CudaLexer', 'MonkeyLexer', 'SwigLexer',
'DylanLidLexer', 'DylanConsoleLexer', 'CobolLexer',
'CobolFreeformatLexer', 'LogosLexer', 'ClayLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r':\s*/[*].*?[*]/\s*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'^(\s*)([a-zA-Z_][a-zA-Z0-9_]*:(?!:))',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|while)\b', Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
# Vector intrinsics
(r'(__(m128i|m128d|m128|m64))\b', Keyword.Reserved),
# Microsoft-isms
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave|wchar_t|w64|unaligned|'
r'raise|noop|identifier|forceinline|assume)\b', Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(r'(asm|catch|const_cast|delete|dynamic_cast|explicit|'
r'export|friend|mutable|namespace|new|operator|'
r'private|protected|public|reinterpret_cast|'
r'restrict|static_cast|template|this|throw|throws|'
r'typeid|typename|using|virtual)\b', Keyword),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(r'__(virtual_inheritance|uuidof|super|single_inheritance|'
r'multiple_inheritance|interface|event)\b', Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'(__offload|__blockingoffload|__outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
return 0.1
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
*New in Pygments 1.7.*
"""
name = 'SWIG'
aliases = ['Swig', 'swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'statements': [
(r'(%[a-z_][a-z0-9_]*)', Name.Function), # SWIG directives
('\$\**\&?[a-zA-Z0-9_]+', Name), # Special variables
(r'##*[a-zA-Z_][a-zA-Z0-9_]*', Comment.Preproc), # Stringification / additional preprocessor directives
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = (
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', '%warnfilter')
def analyse_text(text):
rv = 0.1 # Same as C/C++
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
*New in Pygments 1.5.*
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(r'(virtual|class|private|public|property|import|delete|new|new0|'
r'renew|renew0|define|get|set|remote|dllexport|dllimport|stdcall|'
r'subclass|__on_register_module|namespace|using|typed_object|'
r'any_object|incref|register|watch|stopwatching|firewatchers|'
r'watchable|class_designer|class_fixed|class_no_expansion|isset|'
r'class_default_property|property_category|class_data|'
r'class_property|virtual|thisclass|'
r'dbtable|dbindex|database_open|dbfield)\b', Keyword),
(r'(uint|uint16|uint32|uint64|bool|byte|unichar|int64)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
*New in Pygments 1.7.*
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(r'(abstract|as|async|atomic|call|command|component|components|'
r'configuration|event|extends|generic|implementation|includes|'
r'interface|module|new|norace|post|provides|signal|task|uses)\b',
Keyword),
(r'(nx_struct|nx_union|nx_int8_t|nx_int16_t|nx_int32_t|nx_int64_t|'
r'nx_uint8_t|nx_uint16_t|nx_uint32_t|nx_uint64_t)\b',
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
*New in Pygments 1.7.*
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s', Text),
(r'//.*?$', Comment.Singleline),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][a-z_?0-9]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'.', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl'),
],
}
class DLexer(RegexLexer):
"""
For D source.
*New in Pygments 1.2.*
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
#(r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(r'(abstract|alias|align|asm|assert|auto|body|break|case|cast'
r'|catch|class|const|continue|debug|default|delegate|delete'
r'|deprecated|do|else|enum|export|extern|finally|final'
r'|foreach_reverse|foreach|for|function|goto|if|import|inout'
r'|interface|invariant|in|is|lazy|mixin|module|new|nothrow|out'
r'|override|package|pragma|private|protected|public|pure|ref|return'
r'|scope|static|struct|super|switch|synchronized|template|this'
r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile'
r'|while|with|__traits)\b', Keyword
),
(r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float'
r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong'
r'|ushort|void|wchar)\b', Keyword.Type
),
(r'(false|true|null)\b', Keyword.Constant),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String
),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q{', String, 'token_string'),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'{', Punctuation, 'token_string_nest'),
(r'}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'{', String, 'delimited_inside_curly'),
(r'}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
}
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = [
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
]
DELPHI_KEYWORDS = [
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
]
FREE_PASCAL_KEYWORDS = [
'dispose', 'exit', 'false', 'new', 'true'
]
BLOCK_KEYWORDS = set([
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
])
FUNCTION_MODIFIERS = set([
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
])
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set([
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
])
BUILTIN_TYPES = set([
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
])
BUILTIN_UNITS = {
'System': [
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
],
'SysUtils': [
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
],
'Classes': [
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
],
'Math': [
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
]
}
ASM_REGISTERS = set([
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
])
ASM_INSTRUCTIONS = set([
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
])
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
*New in Pygments 0.7.*
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set([
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'])
keywords = set([
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'])
operators = set([
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'])
functions = set([
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'])
valid_name = '\\\\?[a-zA-Z0-9' + re.escape('!&*<>|^$%@_-+~?/=') + ']+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([A-Za-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
('', Text, 'code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#[bB][01]+', Number),
# octal integer
(r'#[oO][0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#[xX][0-9a-fA-F]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?|\?\?|\?=|[(){}\[\],\.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-zA-Z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
*New in Pygments 1.6.*
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
*New in Pygments 1.6.*
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile('\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def objective(baselexer):
"""
Generate a subclass of baselexer that accepts the Objective-C syntax
extensions.
"""
# Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
# since that's quite common in ordinary C/C++ files. It's OK to match
# JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
#
# The upshot of this is that we CANNOT match @class or @interface
_oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
# Matches [ <ws>? identifier <ws> ( identifier <ws>? ] | identifier? : )
# (note the identifier is *optional* when there is a ':'!)
_oc_message = re.compile(r'\[\s*[a-zA-Z_][a-zA-Z0-9_]*\s+'
r'(?:[a-zA-Z_][a-zA-Z0-9_]*\s*\]|'
r'(?:[a-zA-Z_][a-zA-Z0-9_]*)?:)')
class GeneratedObjectiveCVariant(baselexer):
"""
Implements Objective-C syntax on top of an existing C family lexer.
"""
tokens = {
'statements': [
(r'@"', String, 'string'),
(r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'@0[0-7]+[Ll]?', Number.Oct),
(r'@\d+[Ll]?', Number.Integer),
(r'(in|@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@optional)\b', Keyword),
(r'(id|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b',
Keyword.Type),
(r'@(true|false|YES|NO)\n', Name.Builtin),
(r'(YES|NO|nil)\b', Name.Builtin),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_forward_classname')),
# @ can also prefix other expressions like @{...} or @(...)
(r'@', Punctuation),
inherit,
],
'oc_classname' : [
# interface definition that inherits
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'oc_forward_classname' : [
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'oc_forward_classname'),
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'root': [
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
bygroups(Keyword, Text, using(this),
Text, Name.Function),
'method'),
inherit,
],
'method': [
include('whitespace'),
# TODO unsure if ellipses are allowed elsewhere, see
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
(r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
('', Text, '#pop'),
],
}
def analyse_text(text):
if _oc_keywords.search(text):
return 1.0
elif '@"' in text: # strings
return 0.8
elif _oc_message.search(text):
return 0.8
return 0
return GeneratedObjectiveCVariant
class ObjectiveCLexer(objective(CLexer)):
"""
For Objective-C source code with preprocessor directives.
"""
name = 'Objective-C'
aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
filenames = ['*.m', '*.h']
mimetypes = ['text/x-objective-c']
priority = 0.05 # Lower than C
class ObjectiveCppLexer(objective(CppLexer)):
"""
For Objective-C++ source code with preprocessor directives.
"""
name = 'Objective-C++'
aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++']
filenames = ['*.mm', '*.hh']
mimetypes = ['text/x-objective-c++']
priority = 0.05 # Lower than C++
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
*New in Pygments 0.10.*
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f', '*.f90', '*.F', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][a-z0-9_]*', Name.Variable),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(r'\b(ABSTRACT|ACCEPT|ALLOCATABLE|ALLOCATE|ARRAY|ASSIGN|ASYNCHRONOUS|'
r'BACKSPACE|BIND|BLOCK( DATA)?|BYTE|CALL|CASE|CLASS|CLOSE|COMMON|CONTAINS|'
r'CONTINUE|CYCLE|DATA|DEALLOCATE|DECODE|DEFERRED|DIMENSION|DO|'
r'ELEMENTAL|ELSE|ENCODE|END( FILE)?|ENDIF|ENTRY|ENUMERATOR|EQUIVALENCE|'
r'EXIT|EXTERNAL|EXTRINSIC|FINAL|FORALL|FORMAT|FUNCTION|GENERIC|'
r'GOTO|IF|IMPLICIT|IMPORT|INCLUDE|INQUIRE|INTENT|INTERFACE|'
r'INTRINSIC|MODULE|NAMELIST|NULLIFY|NONE|NON_INTRINSIC|'
r'NON_OVERRIDABLE|NOPASS|OPEN|OPTIONAL|OPTIONS|PARAMETER|PASS|'
r'PAUSE|POINTER|PRINT|PRIVATE|PROGRAM|PROTECTED|PUBLIC|PURE|READ|'
r'RECURSIVE|RESULT|RETURN|REWIND|SAVE|SELECT|SEQUENCE|STOP|SUBROUTINE|'
r'TARGET|THEN|TYPE|USE|VALUE|VOLATILE|WHERE|WRITE|WHILE)\s*\b',
Keyword),
# Data Types
(r'\b(CHARACTER|COMPLEX|DOUBLE PRECISION|DOUBLE COMPLEX|INTEGER|'
r'LOGICAL|REAL|C_INT|C_SHORT|C_LONG|C_LONG_LONG|C_SIGNED_CHAR|'
r'C_SIZE_T|C_INT8_T|C_INT16_T|C_INT32_T|C_INT64_T|C_INT_LEAST8_T|'
r'C_INT_LEAST16_T|C_INT_LEAST32_T|C_INT_LEAST64_T|C_INT_FAST8_T|'
r'C_INT_FAST16_T|C_INT_FAST32_T|C_INT_FAST64_T|C_INTMAX_T|'
r'C_INTPTR_T|C_FLOAT|C_DOUBLE|C_LONG_DOUBLE|C_FLOAT_COMPLEX|'
r'C_DOUBLE_COMPLEX|C_LONG_DOUBLE_COMPLEX|C_BOOL|C_CHAR|C_PTR|'
r'C_FUNPTR)\s*\b',
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[(),:&%;]', Punctuation),
# Intrinsics
(r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|'
r'All|Allocated|ALog|AMax|AMin|AMod|And|ANInt|Any|ASin|Associated|'
r'ATan|BesJ|BesJN|BesY|BesYN|Bit_Size|BTest|CAbs|CCos|Ceiling|'
r'CExp|Char|ChDir|ChMod|CLog|Cmplx|Command_Argument_Count|Complex|'
r'Conjg|Cos|CosH|Count|CPU_Time|CShift|CSin|CSqRt|CTime|C_Funloc|'
r'C_Loc|C_Associated|C_Null_Ptr|C_Null_Funptr|C_F_Pointer|'
r'C_Null_Char|C_Alert|C_Backspace|C_Form_Feed|C_New_Line|'
r'C_Carriage_Return|C_Horizontal_Tab|C_Vertical_Tab|'
r'DAbs|DACos|DASin|DATan|Date_and_Time|DbesJ|'
r'DbesJ|DbesJN|DbesY|DbesY|DbesYN|Dble|DCos|DCosH|DDiM|DErF|DErFC|'
r'DExp|Digits|DiM|DInt|DLog|DLog|DMax|DMin|DMod|DNInt|Dot_Product|'
r'DProd|DSign|DSinH|DSin|DSqRt|DTanH|DTan|DTime|EOShift|Epsilon|'
r'ErF|ErFC|ETime|Exit|Exp|Exponent|Extends_Type_Of|FDate|FGet|'
r'FGetC|Float|Floor|Flush|FNum|FPutC|FPut|Fraction|FSeek|FStat|'
r'FTell|GError|GetArg|Get_Command|Get_Command_Argument|'
r'Get_Environment_Variable|GetCWD|GetEnv|GetGId|GetLog|GetPId|'
r'GetUId|GMTime|HostNm|Huge|IAbs|IAChar|IAnd|IArgC|IBClr|IBits|'
r'IBSet|IChar|IDate|IDiM|IDInt|IDNInt|IEOr|IErrNo|IFix|Imag|'
r'ImagPart|Index|Int|IOr|IRand|IsaTty|IShft|IShftC|ISign|'
r'Iso_C_Binding|Is_Iostat_End|Is_Iostat_Eor|ITime|Kill|Kind|'
r'LBound|Len|Len_Trim|LGe|LGt|Link|LLe|LLt|LnBlnk|Loc|Log|'
r'Logical|Long|LShift|LStat|LTime|MatMul|Max|MaxExponent|MaxLoc|'
r'MaxVal|MClock|Merge|Move_Alloc|Min|MinExponent|MinLoc|MinVal|'
r'Mod|Modulo|MvBits|Nearest|New_Line|NInt|Not|Or|Pack|PError|'
r'Precision|Present|Product|Radix|Rand|Random_Number|Random_Seed|'
r'Range|Real|RealPart|Rename|Repeat|Reshape|RRSpacing|RShift|'
r'Same_Type_As|Scale|Scan|Second|Selected_Int_Kind|'
r'Selected_Real_Kind|Set_Exponent|Shape|Short|Sign|Signal|SinH|'
r'Sin|Sleep|Sngl|Spacing|Spread|SqRt|SRand|Stat|Sum|SymLnk|'
r'System|System_Clock|Tan|TanH|Time|Tiny|Transfer|Transpose|Trim|'
r'TtyNam|UBound|UMask|Unlink|Unpack|Verify|XOr|ZAbs|ZCos|ZExp|'
r'ZLog|ZSin|ZSqRt)\s*\b',
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.Ee])', Number.Integer),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
],
}
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
*New in Pygments 1.1.*
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
#FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'\b(attribute|const|uniform|varying|centroid|break|continue|'
r'do|for|while|if|else|in|out|inout|float|int|void|bool|true|'
r'false|invariant|discard|return|mat[234]|mat[234]x[234]|'
r'vec[234]|[ib]vec[234]|sampler[123]D|samplerCube|'
r'sampler[12]DShadow|struct)\b', Keyword),
(r'\b(asm|class|union|enum|typedef|template|this|packed|goto|'
r'switch|default|inline|noinline|volatile|public|static|extern|'
r'external|interface|long|short|double|half|fixed|unsigned|'
r'lowp|mediump|highp|precision|input|output|hvec[234]|'
r'[df]vec[234]|sampler[23]DRect|sampler2DRectShadow|sizeof|'
r'cast|namespace|using)\b', Keyword), #future use
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
#(r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_][A-Za-z0-9_]*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Cython'
aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|'
r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|Warning|ZeroDivisionError'
r')\b', Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Keyword.Type),
(r'.', Text),
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
(r'', Text, '#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
*New in Pygments 1.1.*
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(r'(as|base|break|case|catch|construct|continue|default|delete|do|'
r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|'
r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b',
Keyword),
(r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|'
r'override|owned|private|protected|public|ref|requires|signal|'
r'static|throws|unowned|var|virtual|volatile|weak|yields)\b',
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|'
r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|'
r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
],
}
class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
*New in Pygments 1.2.*
"""
name = 'Ooc'
aliases = ['ooc']
filenames = ['*.ooc']
mimetypes = ['text/x-ooc']
tokens = {
'root': [
(r'\b(class|interface|implement|abstract|extends|from|'
r'this|super|new|const|final|static|import|use|extern|'
r'inline|proto|break|continue|fallthrough|operator|if|else|for|'
r'while|do|switch|case|as|in|version|return|true|false|null)\b',
Keyword),
(r'include\b', Keyword, 'include'),
(r'(cover)([ \t]+)(from)([ \t]+)([a-zA-Z0-9_]+[*@]?)',
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(func)((?:[ \t]|\\\n)+)(~[a-z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Function)),
(r'\bfunc\b', Keyword),
# Note: %= and ^= not listed on http://ooc-lang.org/syntax
(r'//.*', Comment),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
r'&&?|\|\|?|\^=?)', Operator),
(r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
Name.Function)),
(r'[A-Z][A-Z0-9_]+', Name.Constant),
(r'[A-Z][a-zA-Z0-9_]*([@*]|\[[ \t]*\])?', Name.Class),
(r'([a-z][a-zA-Z0-9_]*(?:~[a-z][a-zA-Z0-9_]*)?)((?:[ \t]|\\\n)*)(?=\()',
bygroups(Name.Function, Text)),
(r'[a-z][a-zA-Z0-9_]*', Name.Variable),
# : introduces types
(r'[:(){}\[\];,]', Punctuation),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0c[0-9]+', Number.Oct),
(r'0b[01]+', Number.Binary),
(r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
(r'[0-9_]+', Number.Decimal),
(r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"',
String.Double),
(r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@', Punctuation), # pointer dereference
(r'\.', Punctuation), # imports or chain operator
(r'\\[ \t\n]', Text),
(r'[ \t]+', Text),
],
'include': [
(r'[\w/]+', Name),
(r',', Punctuation),
(r'[ \t]', Text),
(r'[;\n]', Text, '#pop'),
],
}
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b', Keyword.Declaration),
(r'(break|default|select|case|defer|go'
r'|else|goto|switch|fallthrough|if|range'
r'|continue|for|return)\b', Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr'
r'|print|println|panic|recover|close|complex|real|imag'
r'|len|cap|append|copy|delete|new|make)\b(\()',
bygroups(Name.Builtin, Punctuation)),
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr)\b', Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char
),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[a-zA-Z_]\w*', Name.Other),
]
}
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = [
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
]
keywords = [
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
]
keyword_directives = [
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
]
keyword_declarations = [
'def', 'let', 'ref', 'val', 'var',
]
keyword_types = [
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
]
keyword_constants = [
'false', 'true',
]
operator_words = [
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
]
name_builtins = [
'_svc', 'while',
]
name_pseudo = [
'root', 'self', 'this',
]
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword,
'funcname'),
(r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator),
(r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration),
(r'(%s)\b' % '|'.join(keyword_types), Keyword.Type),
(r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^\/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[\/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
##(r'/[*](.|\n)*?[*]/', Comment),
##(r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
(r'', Error, 'modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class AdaLexer(RegexLexer):
"""
For Ada source code.
*New in Pygments 1.3.*
"""
name = 'Ada'
aliases = ['ada', 'ada95' 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.I # Ignore case
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)([a-z0-9_]+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(Address|Byte|Boolean|Character|Controlled|Count|Cursor|'
r'Duration|File_Mode|File_Type|Float|Generator|Integer|Long_Float|'
r'Long_Integer|Long_Long_Float|Long_Long_Integer|Natural|Positive|'
r'Reference_Type|Short_Float|Short_Integer|Short_Short_Float|'
r'Short_Short_Integer|String|Wide_Character|Wide_String)\b',
Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<[a-z0-9_]+>>', Name.Label),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
r'record|renames|requeue|return|reverse|select|separate|subtype|'
r'synchronized|task|tagged|terminate|then|type|until|when|while|'
r'xor)\b',
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers' : [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute' : [
(r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram' : [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|[a-z0-9_]+', Name.Function),
include('root'),
],
'end' : [
('(if|case|record|loop|select)', Keyword.Reserved),
('"[^"]+"|[a-zA-Z0-9_.]+', Name.Function),
('\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def' : [
(r';', Punctuation, '#pop'),
(r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
Keyword.Reserved)),
include('root'),
],
'record_def' : [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
(r'[a-z0-9_.]+', Name.Namespace, '#pop'),
(r'', Text, '#pop'),
],
'formal_part' : [
(r'\)', Punctuation, '#pop'),
(r'[a-z0-9_]+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
('is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
('\(', Punctuation, 'package_instantiation'),
('([a-zA-Z0-9_.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
Text, Punctuation)),
(r'[a-z0-9._\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
Additional options that determine which keywords are highlighted:
`pim`
Select PIM Modula-2 dialect (default: True).
`iso`
Select ISO Modula-2 dialect (default: False).
`objm2`
Select Objective Modula-2 dialect (default: False).
`gm2ext`
Also highlight GNU extensions (default: False).
*New in Pygments 1.3.*
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'identifiers': [
(r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name),
],
'numliterals': [
(r'[01]+B', Number.Binary), # binary number (ObjM2)
(r'[0-7]+B', Number.Oct), # octal number (PIM + ISO)
(r'[0-7]+C', Number.Oct), # char code (PIM + ISO)
(r'[0-9A-F]+C', Number.Hex), # char code (ObjM2)
(r'[0-9A-F]+H', Number.Hex), # hexadecimal number
(r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
(r'[0-9]+\.[0-9]+', Number.Float), # real number
(r'[0-9]+', Number.Integer), # decimal whole number
],
'strings': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'operators': [
(r'[*/+=#~&<>\^-]', Operator),
(r':=', Operator), # assignment
(r'@', Operator), # pointer deref (ISO)
(r'\.\.', Operator), # ellipsis or range
(r'`', Operator), # Smalltalk message (ObjM2)
(r'::', Operator), # type conversion (ObjM2)
],
'punctuation': [
(r'[\(\)\[\]{},.:;|]', Punctuation),
],
'comments': [
(r'//.*?\n', Comment.Single), # ObjM2
(r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
(r'\(\*([^\$].*?)\*\)', Comment.Multiline),
# TO DO: nesting of (* ... *) comments
],
'pragmas': [
(r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
(r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2
],
'root': [
include('whitespace'),
include('comments'),
include('pragmas'),
include('identifiers'),
include('numliterals'),
include('strings'),
include('operators'),
include('punctuation'),
]
}
pim_reserved_words = [
# 40 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'WHILE', 'WITH',
]
pim_pervasives = [
# 31 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
]
iso_reserved_words = [
# 46 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
'WITH',
]
iso_pervasives = [
# 42 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
]
objm2_reserved_words = [
# base language, 42 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
# OO extensions, 16 reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
]
objm2_pervasives = [
# base language, 38 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
# OO extensions, 3 pervasives
'OBJECT', 'NO', 'YES',
]
gnu_reserved_words = [
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
]
gnu_pervasives = [
# 21 identifiers, actually from pseudo-module SYSTEM
# but we will highlight them as if they were pervasives
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
]
def __init__(self, **options):
self.reserved_words = set()
self.pervasives = set()
# ISO Modula-2
if get_bool_opt(options, 'iso', False):
self.reserved_words.update(self.iso_reserved_words)
self.pervasives.update(self.iso_pervasives)
# Objective Modula-2
elif get_bool_opt(options, 'objm2', False):
self.reserved_words.update(self.objm2_reserved_words)
self.pervasives.update(self.objm2_pervasives)
# PIM Modula-2 (DEFAULT)
else:
self.reserved_words.update(self.pim_reserved_words)
self.pervasives.update(self.pim_pervasives)
# GNU extensions
if get_bool_opt(options, 'gm2ext', False):
self.reserved_words.update(self.gnu_reserved_words)
self.pervasives.update(self.gnu_pervasives)
# initialise
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# check for reserved words and pervasives
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
elif value in self.pervasives:
token = Keyword.Pervasive
# return result
yield index, token, value
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
*New in Pygments 1.4.*
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_][a-z0-9_]*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)([:])([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number), # Binary
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(r'\b(TNullMethodException|TNullFunctionException|'
r'TNullObjectException|TArrayBoundsException|'
r'TRuntimeException)\b', Name.Exception),
(r'\b(Strict|SuperStrict|Module|ModuleInfo|'
r'End|Return|Continue|Exit|Public|Private|'
r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|'
r'New|Release|Delete|'
r'Incbin|IncbinPtr|IncbinLen|'
r'Framework|Include|Import|Extern|EndExtern|'
r'Function|EndFunction|'
r'Type|EndType|Extends|'
r'Method|EndMethod|'
r'Abstract|Final|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|EachIn|'
r'While|Wend|EndWhile|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|EndSelect|'
r'Try|Catch|EndTry|Throw|Assert|'
r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
*New in Pygments 1.7.*
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_vopwords = (r'\b(Shl|Shr|Sar|Mod|Or|And|Not|'
r'Abs|Sgn|Handle|Int|Float|Str|'
r'First|Last|Before|After)\b')
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z][a-z0-9_]*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number), # Binary
# Other
(r'(?:%s|([+\-*/~=<>^]))' % (bb_vopwords), Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type,Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(r'\b(End|Return|Exit|'
r'Chr|Len|Asc|'
r'New|Delete|Insert|'
r'Include|'
r'Function|'
r'Type|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|Each|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|'
r'Goto|Gosub|Data|Read|Restore)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0[xX][a-fA-F0-9][a-fA-F0-9_]*', Number.Hex, 'int-suffix'),
(r'0[bB][01][01_]*', Number, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-fA-F0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"\$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-fA-F0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'[eE][+-]?[0-9][0-9_]*', Number.Float),
(r'', Text, '#pop')
],
'float-suffix': [
(r'\'[fF](32|64)', Number.Float),
(r'', Text, '#pop')
],
'int-suffix': [
(r'\'[iI](32|64)', Number.Integer.Long),
(r'\'[iI](8|16)', Number.Integer),
(r'', Text, '#pop')
],
}
class FantomLexer(RegexLexer):
"""
For Fantom source code.
*New in Pygments 1.5.*
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict (
pod = r'[\"\w\.]+',
eos = r'\n|;',
id = r'[a-zA-Z_][a-zA-Z0-9_]*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type = r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]\|\->\?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline
(r'//.*?\n', Comment.Single), #Single line
#todo: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), #Fandoc
(r'#.*\n', Comment.Single) #Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number),
#Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex
(r'\b-?[\d_]+', Number.Integer), #Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char
(r'"', Punctuation, 'insideStr'), #Opening quote
(r'`', Punctuation, 'insideUri'), #Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), #Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state = 'inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state = 'inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'"', Punctuation, '#pop'), #Closing quot
(r'.', String) #String content
],
'insideUri': [ #TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'`', Punctuation, '#pop'), #Closing tick
(r'.', String.Backtick) #URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|'
r'switch|case|default|continue|break|do|return|get|set)\b',
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]\|\->:\?]', Punctuation),
(s(r'$id'), Name.Class),
(r'', Text, '#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
### Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state = 'inType'), Text,
Name.Variable, Text, Operator)),
### var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
### .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
### .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
### new makeXXX ( ####
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### Type name ( ####
(s(r'($type)([ \t]+)' #Return type and whitespace
r'($id)(\s*)(\()'), #method name + open brace
bygroups(using(this, state = 'inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### ArgType argName, #####
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
#### ArgType argName) ####
## Covered in 'insideParen' state
### ArgType argName -> ArgType| ###
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state = 'inType'),
Punctuation)),
### ArgType argName| ###
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
### Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), #Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), #ffi
(r'(\")?([\w\.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), #podname
(r'::', Punctuation, 'usingClass'),
(r'', Text, '#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w\$]+', Name.Class),
(r'', Text, '#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'{', Punctuation, 'facetFields'),
(r'', Text, '#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'}', Punctuation, '#pop'),
(r'.', Text)
],
}
class RustLexer(RegexLexer):
"""
Lexer for Mozilla's Rust programming language.
*New in Pygments 1.6.*
"""
name = 'Rust'
filenames = ['*.rs', '*.rc']
aliases = ['rust']
mimetypes = ['text/x-rustsrc']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'//(.*?)\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
# Keywords
(r'(as|assert|break|const'
r'|copy|do|else|enum|extern|fail'
r'|false|fn|for|if|impl|let|log'
r'|loop|match|mod|move|mut|once|priv|pub|pure'
r'|ref|return|static|struct|trait|true|type|unsafe|use|while'
r'|u8|u16|u32|u64|i8|i16|i32|i64|uint'
r'|int|float|f32|f64|str)\b', Keyword),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# Binary Literal
(r'0[Bb][01_]+', Number, 'number_lit'),
# Octal Literal
(r'0[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
(r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?'
r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'),
# String Literal
(r'"', String, 'string'),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
# Attributes
(r'#\[', Comment.Preproc, 'attribute['),
(r'#\(', Comment.Preproc, 'attribute('),
# Macros
(r'[A-Za-z_][A-Za-z0-9_]*!\[', Comment.Preproc, 'attribute['),
(r'[A-Za-z_][A-Za-z0-9_]*!\(', Comment.Preproc, 'attribute('),
],
'number_lit': [
(r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape),
(r'[^\\"]+', String),
(r'\\', String),
],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
(r'\(', Comment.Preproc, 'attribute('),
],
'attribute[': [
include('attribute_common'),
(r'\];?', Comment.Preproc, '#pop'),
(r'[^"\]]+', Comment.Preproc),
],
'attribute(': [
include('attribute_common'),
(r'\);?', Comment.Preproc, '#pop'),
(r'[^"\)]+', Comment.Preproc),
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
*New in Pygments 1.6.*
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = ['__device__', '__global__', '__host__',
'__noinline__', '__forceinline__']
variable_qualifiers = ['__device__', '__constant__', '__shared__',
'__restrict__']
vector_types = ['char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3']
variables = ['gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize']
functions = ['__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or']
execution_confs = ['<<<', '>>>']
def get_tokens_unprocessed(self, text):
for index, token, value in \
CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
*New in Pygments 1.6.*
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_][a-zA-Z0-9_]*'
name_function = r'[A-Z][a-zA-Z0-9_]*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z][a-zA-Z0-9_]*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
#Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number), # Binary
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[\(\){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
(r'', Text, '#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
(r'', Text, '#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment' : [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
*New in Pygments 1.6.*
"""
name = 'COBOL'
aliases = ['cobol']
filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
mimetypes = ['text/x-cobol']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: by PICTURE and USAGE
# Operators: **, *, +, -, /, <, >, <=, >=, =, <>
# Logical (?): NOT, AND, OR
# Reserved words:
# http://opencobol.add1tocobol.com/#reserved-words
# Intrinsics:
# http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
tokens = {
'root': [
include('comment'),
include('strings'),
include('core'),
include('nums'),
(r'[a-z0-9]([_a-z0-9\-]*[a-z0-9]+)?', Name.Variable),
# (r'[\s]+', Text),
(r'[ \t]+', Text),
],
'comment': [
(r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
],
'core': [
# Figurative constants
(r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
r'\s*($|(?=[^0-9a-z_\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACCEPT|ADD|ALLOCATE|CALL|CANCEL|CLOSE|COMPUTE|'
r'CONFIGURATION|CONTINUE|'
r'DATA|DELETE|DISPLAY|DIVIDE|DIVISION|ELSE|END|END-ACCEPT|'
r'END-ADD|END-CALL|END-COMPUTE|END-DELETE|END-DISPLAY|'
r'END-DIVIDE|END-EVALUATE|END-IF|END-MULTIPLY|END-OF-PAGE|'
r'END-PERFORM|END-READ|END-RETURN|END-REWRITE|END-SEARCH|'
r'END-START|END-STRING|END-SUBTRACT|END-UNSTRING|END-WRITE|'
r'ENVIRONMENT|EVALUATE|EXIT|FD|FILE|FILE-CONTROL|FOREVER|'
r'FREE|GENERATE|GO|GOBACK|'
r'IDENTIFICATION|IF|INITIALIZE|'
r'INITIATE|INPUT-OUTPUT|INSPECT|INVOKE|I-O-CONTROL|LINKAGE|'
r'LOCAL-STORAGE|MERGE|MOVE|MULTIPLY|OPEN|'
r'PERFORM|PROCEDURE|PROGRAM-ID|RAISE|READ|RELEASE|RESUME|'
r'RETURN|REWRITE|SCREEN|'
r'SD|SEARCH|SECTION|SET|SORT|START|STOP|STRING|SUBTRACT|'
r'SUPPRESS|TERMINATE|THEN|UNLOCK|UNSTRING|USE|VALIDATE|'
r'WORKING-STORAGE|WRITE)'
r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Reserved),
# Reserved words
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACCESS|ADDRESS|ADVANCING|AFTER|ALL|'
r'ALPHABET|ALPHABETIC|ALPHABETIC-LOWER|ALPHABETIC-UPPER|'
r'ALPHANUMERIC|ALPHANUMERIC-EDITED|ALSO|ALTER|ALTERNATE'
r'ANY|ARE|AREA|AREAS|ARGUMENT-NUMBER|ARGUMENT-VALUE|AS|'
r'ASCENDING|ASSIGN|AT|AUTO|AUTO-SKIP|AUTOMATIC|AUTOTERMINATE|'
r'BACKGROUND-COLOR|BASED|BEEP|BEFORE|BELL|'
r'BLANK|'
r'BLINK|BLOCK|BOTTOM|BY|BYTE-LENGTH|CHAINING|'
r'CHARACTER|CHARACTERS|CLASS|CODE|CODE-SET|COL|COLLATING|'
r'COLS|COLUMN|COLUMNS|COMMA|COMMAND-LINE|COMMIT|COMMON|'
r'CONSTANT|CONTAINS|CONTENT|CONTROL|'
r'CONTROLS|CONVERTING|COPY|CORR|CORRESPONDING|COUNT|CRT|'
r'CURRENCY|CURSOR|CYCLE|DATE|DAY|DAY-OF-WEEK|DE|DEBUGGING|'
r'DECIMAL-POINT|DECLARATIVES|DEFAULT|DELIMITED|'
r'DELIMITER|DEPENDING|DESCENDING|DETAIL|DISK|'
r'DOWN|DUPLICATES|DYNAMIC|EBCDIC|'
r'ENTRY|ENVIRONMENT-NAME|ENVIRONMENT-VALUE|EOL|EOP|'
r'EOS|ERASE|ERROR|ESCAPE|EXCEPTION|'
r'EXCLUSIVE|EXTEND|EXTERNAL|'
r'FILE-ID|FILLER|FINAL|FIRST|FIXED|FLOAT-LONG|FLOAT-SHORT|'
r'FOOTING|FOR|FOREGROUND-COLOR|FORMAT|FROM|FULL|FUNCTION|'
r'FUNCTION-ID|GIVING|GLOBAL|GROUP|'
r'HEADING|HIGHLIGHT|I-O|ID|'
r'IGNORE|IGNORING|IN|INDEX|INDEXED|INDICATE|'
r'INITIAL|INITIALIZED|INPUT|'
r'INTO|INTRINSIC|INVALID|IS|JUST|JUSTIFIED|KEY|LABEL|'
r'LAST|LEADING|LEFT|LENGTH|LIMIT|LIMITS|LINAGE|'
r'LINAGE-COUNTER|LINE|LINES|LOCALE|LOCK|'
r'LOWLIGHT|MANUAL|MEMORY|MINUS|MODE|'
r'MULTIPLE|NATIONAL|NATIONAL-EDITED|NATIVE|'
r'NEGATIVE|NEXT|NO|NULL|NULLS|NUMBER|NUMBERS|NUMERIC|'
r'NUMERIC-EDITED|OBJECT-COMPUTER|OCCURS|OF|OFF|OMITTED|ON|ONLY|'
r'OPTIONAL|ORDER|ORGANIZATION|OTHER|OUTPUT|OVERFLOW|'
r'OVERLINE|PACKED-DECIMAL|PADDING|PAGE|PARAGRAPH|'
r'PLUS|POINTER|POSITION|POSITIVE|PRESENT|PREVIOUS|'
r'PRINTER|PRINTING|PROCEDURE-POINTER|PROCEDURES|'
r'PROCEED|PROGRAM|PROGRAM-POINTER|PROMPT|QUOTE|'
r'QUOTES|RANDOM|RD|RECORD|RECORDING|RECORDS|RECURSIVE|'
r'REDEFINES|REEL|REFERENCE|RELATIVE|REMAINDER|REMOVAL|'
r'RENAMES|REPLACING|REPORT|REPORTING|REPORTS|REPOSITORY|'
r'REQUIRED|RESERVE|RETURNING|REVERSE-VIDEO|REWIND|'
r'RIGHT|ROLLBACK|ROUNDED|RUN|SAME|SCROLL|'
r'SECURE|SEGMENT-LIMIT|SELECT|SENTENCE|SEPARATE|'
r'SEQUENCE|SEQUENTIAL|SHARING|SIGN|SIGNED|SIGNED-INT|'
r'SIGNED-LONG|SIGNED-SHORT|SIZE|SORT-MERGE|SOURCE|'
r'SOURCE-COMPUTER|SPECIAL-NAMES|STANDARD|'
r'STANDARD-1|STANDARD-2|STATUS|SUM|'
r'SYMBOLIC|SYNC|SYNCHRONIZED|TALLYING|TAPE|'
r'TEST|THROUGH|THRU|TIME|TIMES|TO|TOP|TRAILING|'
r'TRANSFORM|TYPE|UNDERLINE|UNIT|UNSIGNED|'
r'UNSIGNED-INT|UNSIGNED-LONG|UNSIGNED-SHORT|UNTIL|UP|'
r'UPDATE|UPON|USAGE|USING|VALUE|VALUES|VARYING|WAIT|WHEN|'
r'WITH|WORDS|YYYYDDD|YYYYMMDD)'
r'\s*($|(?=[^0-9a-z_\-]))', Keyword.Pseudo),
# inactive reserved words
(r'(^|(?<=[^0-9a-z_\-]))'
r'(ACTIVE-CLASS|ALIGNED|ANYCASE|ARITHMETIC|ATTRIBUTE|B-AND|'
r'B-NOT|B-OR|B-XOR|BIT|BOOLEAN|CD|CENTER|CF|CH|CHAIN|CLASS-ID|'
r'CLASSIFICATION|COMMUNICATION|CONDITION|DATA-POINTER|'
r'DESTINATION|DISABLE|EC|EGI|EMI|ENABLE|END-RECEIVE|'
r'ENTRY-CONVENTION|EO|ESI|EXCEPTION-OBJECT|EXPANDS|FACTORY|'
r'FLOAT-BINARY-16|FLOAT-BINARY-34|FLOAT-BINARY-7|'
r'FLOAT-DECIMAL-16|FLOAT-DECIMAL-34|FLOAT-EXTENDED|FORMAT|'
r'FUNCTION-POINTER|GET|GROUP-USAGE|IMPLEMENTS|INFINITY|'
r'INHERITS|INTERFACE|INTERFACE-ID|INVOKE|LC_ALL|LC_COLLATE|'
r'LC_CTYPE|LC_MESSAGES|LC_MONETARY|LC_NUMERIC|LC_TIME|'
r'LINE-COUNTER|MESSAGE|METHOD|METHOD-ID|NESTED|NONE|NORMAL|'
r'OBJECT|OBJECT-REFERENCE|OPTIONS|OVERRIDE|PAGE-COUNTER|PF|PH|'
r'PROPERTY|PROTOTYPE|PURGE|QUEUE|RAISE|RAISING|RECEIVE|'
r'RELATION|REPLACE|REPRESENTS-NOT-A-NUMBER|RESET|RESUME|RETRY|'
r'RF|RH|SECONDS|SEGMENT|SELF|SEND|SOURCES|STATEMENT|STEP|'
r'STRONG|SUB-QUEUE-1|SUB-QUEUE-2|SUB-QUEUE-3|SUPER|SYMBOL|'
r'SYSTEM-DEFAULT|TABLE|TERMINAL|TEXT|TYPEDEF|UCS-4|UNIVERSAL|'
r'USER-DEFAULT|UTF-16|UTF-8|VAL-STATUS|VALID|VALIDATE|'
r'VALIDATE-STATUS)\s*($|(?=[^0-9a-z_\-]))', Error),
# Data Types
(r'(^|(?<=[^0-9a-z_\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
# (r'(::)', Keyword.Declaration),
(r'([(),;:&%.])', Punctuation),
# Intrinsics
(r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG10|LOG|'
r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
r'($|(?=[^0-9a-z_\-]))', Name.Function),
# Booleans
(r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin),
# Comparing Operators
(r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|'
r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
'strings': [
# apparently strings can be delimited by EOL if they are continued
# in the next line
(r'"[^"\n]*("|\n)', String.Double),
(r"'[^'\n]*('|\n)", String.Single),
],
'nums': [
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
],
}
class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
*New in Pygments 1.6.*
"""
name = 'COBOLFree'
aliases = ['cobolfree']
filenames = ['*.cbl', '*.CBL']
mimetypes = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'comment': [
(r'(\*>.*\n|^\w*\*.*$)', Comment),
],
}
class LogosLexer(ObjectiveCppLexer):
"""
For Logos + Objective-C source code with preprocessor directives.
*New in Pygments 1.6.*
"""
name = 'Logos'
aliases = ['logos']
filenames = ['*.x', '*.xi', '*.xm', '*.xmi']
mimetypes = ['text/x-logos']
priority = 0.25
tokens = {
'statements': [
(r'(%orig|%log)\b', Keyword),
(r'(%c)\b(\()(\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\))',
bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)),
(r'(%init)\b(\()',
bygroups(Keyword, Punctuation), 'logos_init_directive'),
(r'(%init)(?=\s*;)', bygroups(Keyword)),
(r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)',
bygroups(Keyword, Text, Name.Class), '#pop'),
(r'(%subclass)(\s+)', bygroups(Keyword, Text),
('#pop', 'logos_classname')),
inherit,
],
'logos_init_directive' : [
('\s+', Text),
(',', Punctuation, ('logos_init_directive', '#pop')),
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(=)(\s*)([^);]*)',
bygroups(Name.Class, Text, Punctuation, Text, Text)),
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class),
('\)', Punctuation, '#pop'),
],
'logos_classname' : [
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'root': [
(r'(%subclass)(\s+)', bygroups(Keyword, Text),
'logos_classname'),
(r'(%hook|%group)(\s+)([a-zA-Z$_][a-zA-Z0-9$_]+)',
bygroups(Keyword, Text, Name.Class)),
(r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)',
bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
(r'(%ctor)(\s*)({)', bygroups(Keyword, Text, Punctuation),
'function'),
(r'(%new)(\s*)(\()(\s*.*?\s*)(\))',
bygroups(Keyword, Text, Keyword, String, Keyword)),
(r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
inherit,
],
}
_logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()')
def analyse_text(text):
if LogosLexer._logos_keywords.search(text):
return 1.0
return 0
| djanowski/pygmentize | vendor/pygments/pygments/lexers/compiled.py | Python | mit | 155,163 |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for various static pages (like the About page)."""
from core.tests import test_utils
class NoninteractivePagesTest(test_utils.GenericTestBase):
def test_about_page(self):
"""Test the About page."""
response = self.testapp.get('/about')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.content_type, 'text/html')
response.mustcontain('Credits', 'Contact', 'License')
| DewarM/oppia | core/controllers/pages_test.py | Python | apache-2.0 | 1,048 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dicom2fem',
description='Generation of finite element meshes from DICOM images',
long_desctioption="Generation of finite element meshes using computed " +
"tomography scans. Segmentation is based on the graph cut algorithm.",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.1.13',
url='https://github.com/vlukes/dicom2fem',
author='Vladimir Lukes',
author_email='vlukes@kme.zcu.cz',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='fem dicom',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['dist', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[
# 'numpy', 'imcut'
],
# dependency_links=['https://github.com/mjirik/gco_python'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| mjirik/dicom2fem | setup.py | Python | bsd-3-clause | 3,561 |
"""
Copyright 2012 Jan Demter <jan@demter.de>
This file is part of LODStats.
LODStats is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
LODStats is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LODStats. If not, see <http://www.gnu.org/licenses/>.
"""
from RDFStatInterface import RDFStatInterface
from lodstats.util.namespace import get_namespace
class Links(RDFStatInterface):
"""count links (object vocabulary != subject vocabulary)"""
def __init__(self, results):
super(Links, self).__init__(results)
self.results['count'] = 0
self.ns_links = self.results['namespacelinks'] = {}
def count(self, s, p, o, s_blank, o_l, o_blank, statement):
# count triples with obj. vocabulary not in subj. vocabulary)
if not (statement.subject.is_resource and statement.object.is_resource):
return
subject_uri = get_namespace(s)
object_uri = get_namespace(o)
if object_uri is None or subject_uri is None:
return
if subject_uri != object_uri:
self.results['count'] += 1
so_uri = subject_uri+object_uri
self.ns_links[so_uri] = self.ns_links.get(so_uri, 0) + 1
def voidify(self, void_model, dataset):
pass
def sparql(self, endpoint):
pass
| ahmadassaf/LODStats | lodstats/stats/Links.py | Python | gpl-3.0 | 1,766 |
# urllib3/_collections.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import deque
from threading import RLock
__all__ = ['RecentlyUsedContainer']
class AccessEntry(object):
__slots__ = ('key', 'is_valid')
def __init__(self, key, is_valid=True):
self.key = key
self.is_valid = is_valid
class RecentlyUsedContainer(dict):
"""
Provides a dict-like that maintains up to ``maxsize`` keys while throwing
away the least-recently-used keys beyond ``maxsize``.
"""
# If len(self.access_log) exceeds self._maxsize * CLEANUP_FACTOR, then we
# will attempt to cleanup the invalidated entries in the access_log
# datastructure during the next 'get' operation.
CLEANUP_FACTOR = 10
def __init__(self, maxsize=10):
self._maxsize = maxsize
self._container = {}
# We use a deque to to store our keys ordered by the last access.
self.access_log = deque()
self.access_log_lock = RLock()
# We look up the access log entry by the key to invalidate it so we can
# insert a new authorative entry at the head without having to dig and
# find the old entry for removal immediately.
self.access_lookup = {}
# Trigger a heap cleanup when we get past this size
self.access_log_limit = maxsize * self.CLEANUP_FACTOR
def _invalidate_entry(self, key):
"If exists: Invalidate old entry and return it."
old_entry = self.access_lookup.get(key)
if old_entry:
old_entry.is_valid = False
return old_entry
def _push_entry(self, key):
"Push entry onto our access log, invalidate the old entry if exists."
self._invalidate_entry(key)
new_entry = AccessEntry(key)
self.access_lookup[key] = new_entry
self.access_log_lock.acquire()
self.access_log.appendleft(new_entry)
self.access_log_lock.release()
def _prune_entries(self, num):
"Pop entries from our access log until we popped ``num`` valid ones."
while num > 0:
self.access_log_lock.acquire()
p = self.access_log.pop()
self.access_log_lock.release()
if not p.is_valid:
continue # Invalidated entry, skip
dict.pop(self, p.key, None)
self.access_lookup.pop(p.key, None)
num -= 1
def _prune_invalidated_entries(self):
"Rebuild our access_log without the invalidated entries."
self.access_log_lock.acquire()
self.access_log = deque(e for e in self.access_log if e.is_valid)
self.access_log_lock.release()
def _get_ordered_access_keys(self):
"Return ordered access keys for inspection. Used for testing."
self.access_log_lock.acquire()
r = [e.key for e in self.access_log if e.is_valid]
self.access_log_lock.release()
return r
def __getitem__(self, key):
item = dict.get(self, key)
if not item:
raise KeyError(key)
# Insert new entry with new high priority, also implicitly invalidates
# the old entry.
self._push_entry(key)
if len(self.access_log) > self.access_log_limit:
# Heap is getting too big, try to clean up any tailing invalidated
# entries.
self._prune_invalidated_entries()
return item
def __setitem__(self, key, item):
# Add item to our container and access log
dict.__setitem__(self, key, item)
self._push_entry(key)
# Discard invalid and excess entries
self._prune_entries(len(self) - self._maxsize)
def __delitem__(self, key):
self._invalidate_entry(key)
self.access_lookup.pop(key, None)
dict.__delitem__(self, key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
| samabhi/pstHealth | venv/lib/python2.7/site-packages/requests/packages/urllib3/_collections.py | Python | mit | 4,119 |
__author__ = 'Kristy'
import bpy
from . import bpy_workspace as ws
import os
import json
from ematoblender.scripts.ema_shared import properties as pps
from .. import blender_shared_objects as bsh
import scripts.ema_shared.general_maths as bm
import scripts.ema_blender.coil_info as ci
def link_lips_to_coils():
"""Use the shared objects lip_control_points to fix the lips to the relevant sensors."""
#todo: PRESCALE AND PLACE ARMATURE SO THIS DOESN'T GET TOO WEIRD
lip_ul = bpy.data.objects.get(pps.UL_empty_name, 0)
lip_ll = bpy.data.objects.get(pps.LL_empty_name, 0)
lip_sll = bpy.data.objects.get(pps.SL_empty_name_right, 0)
for i, obj, place in bsh.ema_active_meshes:
if place == 'UL':
#lip_ul.location = obj.location
lip_ul.parent = obj
elif place == 'LL':
#lip_ll.location = obj.location
lip_ll.parent = obj
elif place == 'SL':
#lip_sll.location = obj.location
lip_sll.parent = obj
# lets after the user has placed sensors on the tongue manually
@ws.postfn_gamemaster_reset_decorator
@ws.prefn_gamemaster_reset_decorator
def post_alignment_by_hand():
"""Do all the things that need to be done after the TT, TM, TB cubes and empties have been shrinkwrap-moved..."""
cubes = ['TT', 'TM', 'TB']
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
# move the origin of the shrinkwrapped things to the mesh surface, move the children there too
for cn in cubes:
bpy.ops.object.select_pattern(pattern=cn)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
cubeobj = bpy.data.objects[cn]
thisempty = cubeobj.children[0]
thisempty.location = cubeobj.location
# make the shrinkwrapped things into cubes again
cubeobj.modifiers.remove(cubeobj.modifiers['Shrinkwrap'])
| m2ci-msp/ematoblender | ematoblender/scripts/ema_blender/ema_bpy/bpy_link_assets_coils.py | Python | gpl-3.0 | 1,907 |
# coding=utf-8
"""
Collect IO Stats
Note: You may need to artifically generate some IO load on a disk/partition
before graphite will generate the metrics.
* http://www.kernel.org/doc/Documentation/iostats.txt
#### Dependencies
* /proc/diskstats
"""
import diamond.collector
import diamond.convertor
import time
import os
import re
try:
import psutil
except ImportError:
psutil = None
class DiskUsageCollector(diamond.collector.Collector):
MAX_VALUES = {
'reads': 4294967295,
'reads_merged': 4294967295,
'reads_milliseconds': 4294967295,
'writes': 4294967295,
'writes_merged': 4294967295,
'writes_milliseconds': 4294967295,
'io_milliseconds': 4294967295,
'io_milliseconds_weighted': 4294967295
}
LastCollectTime = None
def get_default_config_help(self):
config_help = super(DiskUsageCollector, self).get_default_config_help()
config_help.update({
'devices': "A regex of which devices to gather metrics for."
+ " Defaults to md, sd, xvd, disk, and dm devices",
'sector_size': 'The size to use to calculate sector usage',
'send_zero': 'Send io data even when there is no io',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DiskUsageCollector, self).get_default_config()
config.update({
'path': 'iostat',
'devices': ('PhysicalDrive[0-9]+$'
+ '|md[0-9]+$'
+ '|sd[a-z]+[0-9]*$'
+ '|x?vd[a-z]+[0-9]*$'
+ '|disk[0-9]+$'
+ '|dm\-[0-9]+$'),
'sector_size': 512,
'send_zero': False,
})
return config
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if (device.startswith('ram')
or device.startswith('loop')):
continue
result[(major, minor)] = {
'device': device,
'reads': float(columns[3]),
'reads_merged': float(columns[4]),
'reads_sectors': float(columns[5]),
'reads_milliseconds': float(columns[6]),
'writes': float(columns[7]),
'writes_merged': float(columns[8]),
'writes_sectors': float(columns[9]),
'writes_milliseconds': float(columns[10]),
'io_in_progress': float(columns[11]),
'io_milliseconds': float(columns[12]),
'io_milliseconds_weighted': float(columns[13])
}
except ValueError:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': (disks[disk].read_bytes
/ int(self.config['sector_size'])),
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': (disks[disk].write_bytes
/ int(self.config['sector_size'])),
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result
def collect(self):
# Handle collection time intervals correctly
CollectTime = time.time()
time_delta = float(self.config['interval'])
if self.LastCollectTime:
time_delta = CollectTime - self.LastCollectTime
if not time_delta:
time_delta = float(self.config['interval'])
self.LastCollectTime = CollectTime
exp = self.config['devices']
reg = re.compile(exp)
results = self.get_disk_statistics()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for key, info in results.iteritems():
metrics = {}
name = info['device']
if not reg.match(name):
continue
for key, value in info.iteritems():
if key == 'device':
continue
oldkey = key
for unit in self.config['byte_unit']:
key = oldkey
if key.endswith('sectors'):
key = key.replace('sectors', unit)
value /= (1024 / int(self.config['sector_size']))
value = diamond.convertor.binary.convert(value=value,
oldUnit='kB',
newUnit=unit)
self.MAX_VALUES[key] = diamond.convertor.binary.convert(
value=diamond.collector.MAX_COUNTER,
oldUnit='byte',
newUnit=unit)
metric_name = '.'.join([info['device'], key])
# io_in_progress is a point in time counter, !derivative
if key != 'io_in_progress':
metric_value = self.derivative(
metric_name,
value,
self.MAX_VALUES[key],
time_delta=False)
else:
metric_value = value
metrics[key] = metric_value
if self.proc_diskstats:
metrics['read_requests_merged_per_second'] = (
metrics['reads_merged'] / time_delta)
metrics['write_requests_merged_per_second'] = (
metrics['writes_merged'] / time_delta)
metrics['reads_per_second'] = metrics['reads'] / time_delta
metrics['writes_per_second'] = metrics['writes'] / time_delta
for unit in self.config['byte_unit']:
metric_name = 'read_%s_per_second' % unit
key = 'reads_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
metric_name = 'write_%s_per_second' % unit
key = 'writes_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
# Set to zero so the nodes are valid even if we have 0 io for
# the metric duration
metric_name = 'average_request_size_%s' % unit
metrics[metric_name] = 0
metrics['io'] = metrics['reads'] + metrics['writes']
metrics['average_queue_length'] = (
metrics['io_milliseconds_weighted']
/ time_delta
/ 1000.0)
metrics['util_percentage'] = (metrics['io_milliseconds']
/ time_delta
/ 10.0)
if metrics['reads'] > 0:
metrics['read_await'] = (
metrics['reads_milliseconds'] / metrics['reads'])
else:
metrics['read_await'] = 0
if metrics['writes'] > 0:
metrics['write_await'] = (
metrics['writes_milliseconds'] / metrics['writes'])
else:
metrics['write_await'] = 0
for unit in self.config['byte_unit']:
rkey = 'reads_%s' % unit
wkey = 'writes_%s' % unit
metric_name = 'average_request_size_%s' % unit
if (metrics['io'] > 0):
metrics[metric_name] = (
metrics[rkey] + metrics[wkey]) / metrics['io']
else:
metrics[metric_name] = 0
metrics['iops'] = metrics['io'] / time_delta
if (metrics['io'] > 0):
metrics['service_time'] = (
metrics['io_milliseconds'] / metrics['io'])
metrics['await'] = (
metrics['reads_milliseconds']
+ metrics['writes_milliseconds']) / metrics['io']
else:
metrics['service_time'] = 0
metrics['await'] = 0
# http://www.scribd.com/doc/15013525
# Page 28
metrics['concurrent_io'] = (metrics['reads_per_second']
+ metrics['writes_per_second']
) * (metrics['service_time']
/ 1000.0)
# Only publish when we have io figures
if (metrics['io'] > 0 or self.config['send_zero']):
for key in metrics:
metric_name = '.'.join([info['device'], key]).replace(
'/', '_')
self.publish(metric_name, metrics[key])
| sebbrandt87/Diamond | src/collectors/diskusage/diskusage.py | Python | mit | 10,773 |
# -*- coding: utf-8 -*-
import sys
#sys.path.append("/usr/lib/python2.6/")
#sys.path.append("/usr/lib/python2.6/lib-dynload")
import sublime, sublime_plugin
import subprocess, time
import tempfile
import os, signal
import stat
#import xml.parsers.expat
import re
import codecs
import glob
import hashlib
import shutil
import functools
# Information about where the plugin is running from
plugin_file = __file__
plugin_filepath = os.path.realpath(plugin_file)
plugin_path = os.path.dirname(plugin_filepath)
# Reload modules
reloader = 'features.haxe_reload_modules'
if sys.version_info >= (3,):
reloader = 'Haxe.' + reloader
if reloader in sys.modules:
sys.modules[reloader].reload_modules()
try: # Python 3
# Import the features module, including the haxelib and key commands etc
from .features import *
from .features.haxelib import *
# Import the helper functions and regex helpers
from .features.haxe_helper import runcmd, show_quick_panel, cache, parse_sig, get_env
from .features.haxe_helper import spaceChars, wordChars, importLine, packageLine
from .features.haxe_helper import compactFunc, compactProp, libLine, classpathLine, typeDecl
from .features.haxe_helper import libFlag, skippable, inAnonymous, extractTag
from .features.haxe_helper import variables, functions, functionParams, paramDefault
from .features.haxe_helper import isType, comments, haxeVersion, haxeFileRegex, controlStruct
from .features.haxe_errors import highlight_errors, extract_errors
except (ValueError): # Python 2
# Import the features module, including the haxelib and key commands etc
from features import *
from features.haxelib import *
# Import the helper functions and regex helpers
from features.haxe_helper import runcmd, show_quick_panel, cache, parse_sig, get_env
from features.haxe_helper import spaceChars, wordChars, importLine, packageLine
from features.haxe_helper import compactFunc, compactProp, libLine, classpathLine, typeDecl
from features.haxe_helper import libFlag, skippable, inAnonymous, extractTag
from features.haxe_helper import variables, functions, functionParams, paramDefault
from features.haxe_helper import isType, comments, haxeVersion, haxeFileRegex, controlStruct
from features.haxe_errors import highlight_errors, extract_errors
# For running background tasks
from subprocess import Popen, PIPE
try:
STARTUP_INFO = subprocess.STARTUPINFO()
STARTUP_INFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
STARTUP_INFO.wShowWindow = subprocess.SW_HIDE
except (AttributeError):
STARTUP_INFO = None
# For parsing xml
from xml.etree import ElementTree
from xml.etree.ElementTree import TreeBuilder as XMLTreeBuilder
try :
from elementtree import SimpleXMLTreeBuilder # part of your codebase
ElementTree.XMLTreeBuilder = SimpleXMLTreeBuilder.TreeBuilder
except ImportError as e:
pass # ST3
try :
stexec = __import__("exec")
ExecCommand = stexec.ExecCommand
AsyncProcess = stexec.AsyncProcess
except ImportError as e :
import Default
stexec = getattr( Default , "exec" )
ExecCommand = stexec.ExecCommand
AsyncProcess = stexec.AsyncProcess
unicode = str #dirty...
class HaxeLib :
available = {}
basePath = None
def __init__( self , name , dev , version ):
self.name = name
self.dev = dev
self.version = version
self.classes = None
self.packages = None
if self.dev :
self.path = self.version
self.version = "dev"
else :
self.path = os.path.join( HaxeLib.basePath , self.name , ",".join(self.version.split(".")) )
#print(self.name + " => " + self.path)
def extract_types( self ):
if self.dev is True or ( self.classes is None and self.packages is None ):
self.classes, self.packages = HaxeComplete.inst.extract_types(
self.path ,
cache_name = '%s_%s.cache' % (self.name, self.version) )
return self.classes, self.packages
@staticmethod
def get( name ) :
if( name in HaxeLib.available.keys()):
return HaxeLib.available[name]
else :
sublime.status_message( "Haxelib : "+ name +" project not installed" )
return None
@staticmethod
def get_completions() :
comps = []
for l in HaxeLib.available :
lib = HaxeLib.available[l]
comps.append( ( lib.name + " [" + lib.version + "]" , lib.name ) )
return comps
@staticmethod
def scan( view ) :
settings = view.settings()
haxelib_path = settings.get("haxelib_path" , "haxelib")
hlout, hlerr = runcmd( [haxelib_path , "config" ] )
HaxeLib.basePath = hlout.strip()
HaxeLib.available = {}
hlout, hlerr = runcmd( [haxelib_path , "list" ] )
for l in hlout.split("\n") :
found = libLine.match( l )
if found is not None :
name, dev, version = found.groups()
lib = HaxeLib( name , dev is not None , version )
HaxeLib.available[ name ] = lib
inst = None
documentationStore = {}
class BuildCache:
def __init__(self, path, raw, build, target):
self.path = path
self.raw = raw
self.build = build
self.target = target
class HaxeBuild :
#auto = None
targets = ["js","cpp","swf","neko","php","java","cs","x","python"]
nme_targets = [
("Flash - test","flash -debug","test"),
("Flash - build only","flash -debug","build"),
("Flash - release","flash","build"),
("HTML5 - test","html5 -debug","test"),
("HTML5 - build only","html5 -debug","build"),
("HTML5 - release","html5","build"),
("C++ - test","cpp -debug","test"),
("C++ - build only","cpp -debug","build"),
("C++ - release","cpp","build"),
("Linux - test","linux -debug","test"),
("Linux - build only","linux -debug","build"),
("Linux - release","linux","build"),
("Linux 64 - test","linux -64 -debug","test"),
("Linux 64 - build only","linux -64 -debug","build"),
("Linux 64 - release","linux -64","build"),
("iOS - test in iPhone simulator","ios -simulator -debug","test"),
("iOS - test in iPad simulator","ios -simulator -ipad -debug","test"),
("iOS - update XCode project","ios -debug","update"),
("iOS - release","ios","build"),
("Android - test","android -debug","test"),
("Android - build only","android -debug","build"),
("Android - release","android","build"),
("WebOS - test", "webos -debug","test"),
("WebOS - build only", "webos -debug","build"),
("WebOS - release", "webos","build"),
("Neko - test","neko -debug","test"),
("Neko - build only","neko -debug","build"),
("Neko - release","neko","build"),
("Neko 64 - test","neko -64 -debug","test"),
("Neko 64 - build only","neko -64 -debug","build"),
("Neko 64 - release","neko -64","build"),
("BlackBerry - test","blackberry -debug","test"),
("BlackBerry - build only","blackberry -debug","build"),
("BlackBerry - release","blackberry","build"),
("Emscripten - test", "emscripten -debug","test"),
("Emscripten - build only", "emscripten -debug","build"),
("Emscripten - release", "emscripten","build"),
]
nme_target = ("Flash - test","flash -debug","test")
flambe_targets = [
("Flash - test", "run flash --debug" ),
("Flash - build only", "build flash --debug" ),
("HTML5 - test", "run html --debug" ),
("HTML5 - build only" , "build html --debug"),
("Android - test" , "run android --debug"),
("Android - build only" , "build android --debug"),
("iOS - test" , "run ios --debug"),
("iOS - build only" , "build ios --debug"),
("Firefox App - test" , "run firefox --debug"),
("Firefox App - build only" , "build firefox --debug"),
]
flambe_target = ("Flash - run", "run flash --debug")
def __init__(self) :
self.args = []
self.main = None
self.target = None
self.output = None
self.hxml = None
self.nmml = None
self.yaml = None
self.classpaths = []
self.libs = []
self.classes = None
self.packages = None
self.libClasses = None
self.libPacks = None
self.openfl = False
self.lime = False
self.cwd = None
def __eq__(self,other) :
return self.__dict__ == other.__dict__
def __cmp__(self,other) :
return self.__dict__ == other.__dict__
def is_valid(self) :
if self.hxml is not None and self.target is None and self.yaml is None and self.nmml is None :
return False
if self.main is None and self.output is None :
return False;
return True;
def to_string(self) :
if not self.is_valid() :
return "Invalid Build"
out = self.main
if self.output is not None :
out = os.path.basename(self.output)
main = self.main
if main is None :
main = "[no main]"
if self.openfl :
return "{out} (openfl / {target})".format(self=self, out=out, target=HaxeBuild.nme_target[0]);
elif self.lime :
return "{out} (lime / {target})".format(self=self, out=out, target=HaxeBuild.nme_target[0]);
elif self.nmml is not None:
return "{out} (NME / {target})".format(self=self, out=out, target=HaxeBuild.nme_target[0]);
elif self.yaml is not None:
return "{out} (Flambe / {target})".format(self=self, out=out, target=HaxeBuild.flambe_target[0]);
else:
if self.target == "--interp" :
return "{main} (interp)".format(main=main);
if self.target == "--run" :
return "{main} (run)".format(main=main);
return "{main} ({target}:{out})".format(self=self, out=out, main=main, target=self.target);
#return "{self.main} {self.target}:{out}".format(self=self, out=out);
def make_hxml( self ) :
outp = "# Autogenerated "+self.hxml+"\n\n"
outp += "# "+self.to_string() + "\n"
outp += "-main "+ self.main + "\n"
for a in self.args :
outp += " ".join( list(a) ) + "\n"
d = os.path.dirname( self.hxml ) + "/"
# relative paths
outp = outp.replace( d , "")
outp = outp.replace( "-cp "+os.path.dirname( self.hxml )+"\n", "")
outp = outp.replace("--no-output" , "")
outp = outp.replace("-v" , "")
#outp = outp.replace("dummy" , self.main.lower() )
#print( outp )
return outp.strip()
def is_temp( self ) :
return not os.path.exists( self.hxml )
def get_types( self ) :
cwd = self.cwd
if cwd is None :
cwd = os.path.dirname( self.hxml )
if self.libClasses is None or self.libPacks is None :
classes = []
packs = []
cp = []
for lib in self.libs :
if lib is None :
continue
c, p = HaxeComplete.inst.extract_types(
os.path.join( cwd , lib.path ),
cache_name = '%s_%s.cache' % (lib.name, lib.version) )
classes.extend( c )
packs.extend( p )
self.libClasses = classes;
self.libPacks = packs;
classes = []
packs = []
cp = self.classpaths
for path in cp :
c, p = HaxeComplete.inst.extract_types( os.path.join( cwd , path ) )
classes.extend( c )
packs.extend( p )
classes.extend(self.libClasses)
packs.extend(self.libPacks)
classes.sort()
packs.sort()
self.classes = classes;
self.packs = packs;
return self.classes, self.packs
def get_classpath(self, view):
filepath = view.file_name()
buildpath = self.hxml
if buildpath is None:
buildpath = self.nmml
if buildpath is None:
buildpath = self.yaml
builddir = os.path.dirname(buildpath)
abscps = []
for cp in self.classpaths:
if os.path.isabs(cp):
abscps.append(cp)
else:
abscps.append(
os.path.normpath(os.path.join(builddir, cp)))
for cp in abscps:
if cp in filepath:
return cp
return None
class HaxeDisplayCompletion( sublime_plugin.TextCommand ):
def show_auto_complete(self):
view = self.view
HaxeComplete.inst.force_display_completion = True
HaxeComplete.inst.type_completion_only = self.type_completion
view.run_command('auto_complete', {
'api_completions_only': True,
'disable_auto_insert': True,
'next_completion_if_showing': False
})
HaxeComplete.inst.force_display_completion = False
HaxeComplete.inst.type_completion_only = False
def run(self, edit, type_completion=False, hide=False):
view = self.view
self.type_completion = type_completion
if hide:
view.run_command('hide_auto_complete')
sublime.set_timeout(self.show_auto_complete, 100)
else:
self.show_auto_complete()
class HaxeInsertCompletion( sublime_plugin.TextCommand ):
def run( self , edit ) :
#print("insert completion")
view = self.view
view.run_command( "insert_best_completion" , {
"default" : ".",
"exact" : True
} )
class HaxeSaveAllAndBuild( sublime_plugin.TextCommand ):
def run( self , edit ) :
complete = HaxeComplete.inst
view = self.view
view.window().run_command("save_all")
complete.run_build( view )
class HaxeRunBuild( sublime_plugin.TextCommand ):
def run( self , edit ) :
complete = HaxeComplete.inst
view = self.view
complete.run_build( view )
class HaxeSelectBuild( sublime_plugin.TextCommand ):
def run( self , edit , all_views = False ) :
complete = HaxeComplete.inst
view = self.view
complete.select_build( view , all_views )
class HaxeComplete( sublime_plugin.EventListener ):
#folder = ""
#buildArgs = []
currentBuild = None
selectingBuild = False
builds = []
haxe_settings_file = 'Preferences.sublime-settings'
currentCompletion = {
"inp" : None,
"outp" : None
}
classpathExclude = ['.git','_std']
classpathDepth = 2
stdPaths = []
stdPackages = []
#stdClasses = ["Void","Float","Int","UInt","Null","Bool","Dynamic","Iterator","Iterable","ArrayAccess"]
stdClasses = []
stdCompletes = []
visibleCompletionList = [] # This will contain the list of visible completions, if there is one.
panel = None
serverMode = False
serverProc = None
serverPort = 6000
compilerVersion = 2
inited = False
def __init__(self):
#print("init haxecomplete")
HaxeComplete.inst = self
self.build_cache = {}
self.force_display_completion = False
self.type_completion_only = False
self.selected_build_id_map = {}
def __del__(self) :
self.stop_server()
def extract_types( self , path , depth = 0 , cache_name = None ) :
classes = []
packs = []
hasClasses = False
if cache_name is not None:
view = sublime.active_window().active_view()
if view.settings().get('haxe_use_cache', True):
cache_str = cache(cache_name)
if cache_str is not None:
spl = cache_str.split(';')
classes = spl[0].split(',')
packs = spl[1].split(',')
return classes, packs
#print(path)
if not os.path.exists( path ) :
print('Warning: path %s doesn´t exists.'%path);
return classes, packs
for fullpath in glob.glob( os.path.join(path,"*.hx") ) :
f = os.path.basename(fullpath)
cl, ext = os.path.splitext( f )
if cl not in HaxeComplete.stdClasses:
s = codecs.open( os.path.join( path , f ) , "r" , "utf-8" , "ignore" )
src = comments.sub( "" , s.read() )
clPack = "";
for ps in packageLine.findall( src ) :
clPack = ps
if clPack == "" :
packDepth = 0
else:
packDepth = len(clPack.split("."))
for decl in typeDecl.findall( src ):
t = decl[1]
params = decl[2]
if( packDepth == depth ) : # and t == cl or cl == "StdTypes"
if t == cl or cl == "StdTypes":
classes.append( t + params )
else:
classes.append( cl + "." + t + params )
hasClasses = True
if hasClasses or depth <= self.classpathDepth :
for f in os.listdir( path ) :
cl, ext = os.path.splitext( f )
if os.path.isdir( os.path.join( path , f ) ) and f not in self.classpathExclude :
packs.append( f )
subclasses,subpacks = self.extract_types( os.path.join( path , f ) , depth + 1 )
for cl in subclasses :
classes.append( f + "." + cl )
classes.sort()
packs.sort()
if cache_name is not None:
view = sublime.active_window().active_view()
if view.settings().get('haxe_use_cache', True):
cache_str = ';'.join((','.join(classes), ','.join(packs)))
cache(cache_name, cache_str)
return classes, packs
def on_post_save( self , view ) :
if view.score_selector(0,'source.hxml') > 0:
self.clear_build(view)
def on_activated( self , view ) :
return self.on_open_file( view )
def on_load( self, view ) :
return self.on_open_file( view )
def on_open_file( self , view ) :
if view.is_loading() :
return;
if view.window() is None:
return
if view.score_selector(0,'source.haxe.2') > 0 :
HaxeCreateType.on_activated( view )
elif view.score_selector(0,'source.hxml,source.erazor,source.nmml') == 0:
return
self.init_plugin( view )
# HaxeProjects.determine_type()
self.extract_build_args( view )
self.get_build( view )
self.generate_build( view )
highlight_errors( view )
def on_pre_save( self , view ) :
if view.score_selector(0,'source.haxe.2') == 0 :
return []
fn = view.file_name()
if fn is not None :
path = os.path.dirname( fn )
if not os.path.isdir( path ) :
os.makedirs( path )
def __on_modified( self , view ):
win = sublime.active_window()
if win is None :
return None
isOk = ( win.active_view().buffer_id() == view.buffer_id() )
if not isOk :
return None
sel = view.sel()
caret = 0
for s in sel :
caret = s.a
if caret == 0 :
return None
if view.score_selector(caret,"source.haxe") == 0 or view.score_selector(caret,"string,comment,keyword.control.directive.conditional.haxe.2") > 0 :
return None
src = view.substr(sublime.Region(0, view.size()))
ch = src[caret-1]
#print(ch)
if ch not in ".(:, " :
view.run_command("haxe_display_completion")
#else :
# view.run_command("haxe_insert_completion")
def generate_build(self, view) :
fn = view.file_name()
if fn is not None and self.currentBuild is not None and fn == self.currentBuild.hxml and view.size() == 0 :
view.run_command("insert_snippet",{
"contents" : self.currentBuild.make_hxml()
})
def select_build( self , view , all_views = False ) :
scopes = view.scope_name(view.sel()[0].end()).split()
if 'source.hxml' in scopes:
view.run_command("save")
self.extract_build_args( view , True , all_views )
def find_nmml( self, folder ) :
nmmls = glob.glob( os.path.join( folder , "*.nmml" ) )
nmmls += glob.glob( os.path.join( folder , "*.xml" ) )
nmmls += glob.glob( os.path.join( folder , "*.hxp" ) )
nmmls += glob.glob( os.path.join( folder , "*.lime" ) )
for build in nmmls:
# yeah...
if not os.path.exists( build ) :
continue
f = codecs.open( build , "r+", "utf-8" , "ignore" )
raw = f.read()
if build in self.build_cache and \
self.build_cache[build].raw == raw:
currentBuild = self.build_cache[build].build
if currentBuild.main is not None :
self.add_build( currentBuild )
continue
currentBuild = HaxeBuild()
currentBuild.hxml = build
currentBuild.nmml = build
currentBuild.openfl = build.endswith("xml")
currentBuild.lime = build.endswith("lime")
buildPath = os.path.dirname(build)
self.build_cache[build] = BuildCache(build, raw, currentBuild, None)
outp = "NME"
is_hxp = build.endswith("hxp")
if is_hxp:
currentBuild.main = 'hxp'
outp = 'Lime/OpenFl'
currentBuild.lime = True
lines = raw.splitlines()
for l in lines:
if len(l) > 200:
continue
if is_hxp:
continue
m = extractTag.search(l)
if not m is None:
#print(m.groups())
tag = m.group(1)
name = m.group(3)
if (tag == "app"):
currentBuild.main = name
currentBuild.args.append( ("-main" , name) )
mFile = re.search("\\b(file|title)=\"([a-z0-9_-]+)\"", l, re.I)
if not mFile is None:
outp = mFile.group(2)
elif (tag == "haxelib"):
currentBuild.libs.append( HaxeLib.get( name ) )
currentBuild.args.append( ("-lib" , name) )
elif (tag == "haxedef"):
currentBuild.args.append( ("-D", name) )
elif (tag == "classpath" or tag == "source"):
currentBuild.classpaths.append( os.path.join( buildPath , name ) )
currentBuild.args.append( ("-cp" , os.path.join( buildPath , name ) ) )
else: # NME 3.2
mPath = re.search("\\bpath=\"([a-z0-9_-]+)\"", l, re.I)
if not mPath is None:
#print(mPath.groups())
path = mPath.group(1)
currentBuild.classpaths.append( os.path.join( buildPath , path ) )
currentBuild.args.append( ("-cp" , os.path.join( buildPath , path ) ) )
outp = os.path.join( folder , outp )
if currentBuild.openfl or currentBuild.lime :
if self.compilerVersion >= 3 :
currentBuild.target = "swf"
else :
currentBuild.target = "swf9"
else :
currentBuild.target = "cpp"
currentBuild.args.append( ("--remap", "flash:nme") )
#currentBuild.args.append( ("-cpp", outp) )
currentBuild.output = outp
currentBuild.args.append( ("-"+currentBuild.target, outp) )
if currentBuild.main is not None :
self.add_build( currentBuild )
def find_yaml( self, folder ) :
yamls = glob.glob( os.path.join( folder , "flambe.yaml") )
for build in yamls :
# yeah...
if not os.path.exists( build ) :
continue
currentBuild = HaxeBuild()
currentBuild.hxml = build
currentBuild.yaml = build
currentBuild.cwd = os.path.dirname( build )
currentBuild.output = "Flambe"
res, err = runcmd(
["flambe","--config" , build, "haxe-flags"] )
lines = res.split('\n')
i, n = 0, len(lines)
while i < n:
if lines[i] == '-lib':
i += 1
lib = HaxeLib.get(lines[i])
if lib is not None:
currentBuild.libs.append(lib)
i += 1
self.add_build( currentBuild )
def read_hxml( self, build ) :
#print("Reading build " + build );
def _read_hxml( build, builds ) :
buildPath = os.path.dirname(build);
spl = build.split("@")
if( len(spl) == 2 ) :
buildPath = spl[0]
build = os.path.join( spl[0] , spl[1] )
if not os.path.exists( build ) :
return builds
if builds:
currentBuild = builds[-1]
else:
currentBuild = HaxeBuild()
currentBuild.hxml = build
currentBuild.cwd = buildPath
builds.append(currentBuild)
#print( currentBuild )
with codecs.open( build , "r+" , "utf-8" , "ignore" ) as f:
lines = f.readlines()
while lines:
l = lines.pop(0)
l = l.strip()
if l.startswith("#") : # a comment
pass
elif l.startswith("--next") :
currentBuild = HaxeBuild()
currentBuild.hxml = build
currentBuild.cwd = buildPath
builds.append(currentBuild)
elif l.startswith("-main") :
spl = l.split(" ", 1)
if len( spl ) == 2 :
currentBuild.main = spl[1]
currentBuild.args.append( ( spl[0] , spl[1] ) )
else :
sublime.status_message( "Invalid build.hxml : no Main class" )
elif l.startswith("-lib") :
spl = l.split(" ", 1)
if len( spl ) == 2 :
lib = HaxeLib.get( spl[1] )
currentBuild.libs.append( lib )
currentBuild.args.append( spl )
else :
sublime.status_message( "Invalid build.hxml : lib not found" )
elif [l for flag in [ "cmd" , "-macro" ] if l.startswith( "-" + flag )] :
spl = l.split(" ", 1)
currentBuild.args.append( ( spl[0] , spl[1] ) )
#elif l.startswith("--connect") and HaxeComplete.inst.serverMode :
# currentBuild.args.append( ( "--connect" , str(self.serverPort) ))
elif [l for flag in [
"D" ,
"swf-version" ,
"swf-header",
"debug" ,
"-no-traces" ,
"-flash-use-stage" ,
"-gen-hx-classes" ,
"-remap" ,
"-no-inline" ,
"-no-opt" ,
"-php-prefix" ,
"-js-namespace" ,
"-dead-code-elimination" ,
"-remap" ,
"-php-front" ,
"-php-lib",
"dce" ,
"-js-modern" ,
"swf-lib"
] if l.startswith( "-"+flag )]:
currentBuild.args.append( l.split(" ", 1) )
elif [l for flag in [ "resource" , "xml" , "java-lib" , "net-lib" ] if l.startswith( "-"+flag )] :
spl = l.split(" ", 1)
outp = os.path.join( buildPath , spl[1] )
currentBuild.args.append( (spl[0] , outp) )
#print(HaxeBuild.targets)
elif [l for flag in HaxeBuild.targets if l.startswith( "-" + flag + " " )] :
spl = l.split(" ", 1)
#outp = os.path.join( folder , spl[1] )
outp = spl[1]
#currentBuild.args.append( ("-"+spl[0], outp) )
currentBuild.target = spl[0][1:]
currentBuild.output = outp
currentBuild.args.append( ( spl[0] , outp ) )
elif l.startswith( "--interp" ) :
currentBuild.target = "--interp"
currentBuild.output = ""
currentBuild.args.append( ( "--interp", ) )
elif l.startswith( "--run" ) :
spl = l.split(" ", 1)
#outp = os.path.join( folder , spl[1] )
outp = spl[1]
currentBuild.target = "--run"
currentBuild.output = outp
currentBuild.main = outp
currentBuild.args.append( ( "--run" , outp ) )
while lines:
l = lines.pop(0).strip()
if (not l) or l.startswith("#") : # an empty line or a comment
continue
currentBuild.args.append( (l,) )
elif l.startswith("-cp "):
cp = l.split(" ", 1)
#view.set_status( "haxe-status" , "Building..." )
classpath = cp[1]
absClasspath = classpath#os.path.join( buildPath , classpath )
currentBuild.classpaths.append( absClasspath )
currentBuild.args.append( ("-cp" , absClasspath ) )
elif l.endswith(".hxml"):
_read_hxml(os.path.join(currentBuild.cwd, l), builds)
elif re.match(r'[A-Za-z0-9_\.]+', l): # a haxe class
currentBuild.args.append( (l,) )
elif l:
sublime.status_message("unknown compiler argument: " + l)
# maybe there is a new compiler argument that we don't know,
# so let's add the argument anyway
currentBuild.args.append( (l,) )
return builds
builds = _read_hxml(build, [])
for build in builds:
if len(build.classpaths) == 0:
build.classpaths.append( build.cwd )
build.args.append( ("-cp" , build.cwd ) )
return [build for build in builds if build.is_valid()]
def add_build( self , build ) :
if build in self.builds :
self.builds.remove( build )
self.builds.insert( 0, build )
def find_hxml( self, folder ) :
hxmls = glob.glob( os.path.join( folder , "*.hxml" ) )
for build in hxmls:
for b in self.read_hxml( build ):
self.add_build( b )
def find_build_file( self , folder ) :
self.find_hxml(folder)
self.find_nmml(folder)
self.find_yaml(folder)
def extract_build_args( self , view ,
forcePanel = False , all_views = False ) :
#print("extract build args")
self.builds = []
fn = view.file_name()
settings = view.settings()
win = view.window()
folder = None
file_folder = None
# folder containing the file, opened in window
project_folder = None
win_folders = []
folders = []
if fn is not None :
file_folder = folder = os.path.dirname(fn)
# find window folder containing the file
if win is not None :
win_folders = win.folders()
for f in win_folders:
if f + os.sep in fn :
project_folder = folder = f
# extract build files from project
build_files = view.settings().get('haxe_builds')
if build_files is not None :
for build in build_files :
if( int(sublime.version()) > 3000 ) and win is not None :
# files are relative to project file name
proj = win.project_file_name()
if( proj is not None ) :
proj_path = os.path.dirname( proj )
build = os.path.join( proj_path , build )
for b in self.read_hxml( build ) :
self.add_build( b )
else :
crawl_folders = []
# go up all folders from file to project or root
if file_folder is not None :
f = os.path.normpath(file_folder)
prev = None
while prev != f and ( project_folder is None or project_folder in f ):
crawl_folders.append( f )
prev = f
f = os.path.abspath(os.path.join(f, os.pardir))
# crawl other window folders
for f in win_folders :
if f not in crawl_folders :
crawl_folders.append( f )
for f in crawl_folders :
self.find_build_file( f )
if len(self.builds) == 1:
if forcePanel :
sublime.status_message("There is only one build")
# will open the build file
#if forcePanel :
# b = self.builds[0]
# f = b.hxml
# v = view.window().open_file(f,sublime.TRANSIENT)
self.set_current_build( view , int(0), forcePanel )
elif len(self.builds) == 0 and forcePanel :
sublime.status_message("No hxml or nmml file found")
f = os.path.join(folder,"build.hxml")
self.currentBuild = None
self.get_build(view)
self.currentBuild.hxml = f
#for whatever reason generate_build doesn't work without transient
v = view.window().open_file(f,sublime.TRANSIENT)
self.set_current_build( view , int(0), forcePanel )
elif len(self.builds) > 1 and forcePanel :
buildsView = []
for b in self.builds :
#for a in b.args :
# v.append( " ".join(a) )
buildsView.append( [b.to_string(), os.path.basename( b.hxml ) ] )
self.selectingBuild = True
sublime.status_message("Please select your build")
show_quick_panel( view.window() , buildsView , lambda i : self.set_current_build(view, int(i), forcePanel, all_views) , sublime.MONOSPACE_FONT )
elif settings.has("haxe-build-id"):
self.set_current_build( view , int(settings.get("haxe-build-id")), forcePanel )
else:
build_id = 0
if project_folder is not None:
if project_folder in self.selected_build_id_map:
build_id = self.selected_build_id_map[project_folder]
else:
for i in range(0, len(self.builds)):
if project_folder in self.builds[i].hxml:
build_id = i
break
self.set_current_build(view, build_id, forcePanel)
def set_current_build( self , view , id , forcePanel ,
all_views = False ) :
if id == -1:
return
if id >= len(self.builds) :
id = 0
win = view.window()
project_folder = None
if forcePanel:
if win is not None :
win_folders = win.folders()
for f in win_folders:
if f + os.sep in view.file_name() :
project_folder = f
if project_folder is not None:
self.selected_build_id_map[project_folder] = id
if all_views and win is not None and project_folder is not None:
for v in win.views():
if v.score_selector(0,'source.haxe.2') == 0:
continue
if project_folder + os.sep not in v.file_name():
continue
v.settings().set( "haxe-build-id" , id )
else:
view.settings().set( "haxe-build-id" , id )
if len(self.builds) > 0 :
self.currentBuild = self.builds[id]
view.set_status( "haxe-build" , self.currentBuild.to_string() )
else:
#self.currentBuild = None
view.set_status( "haxe-build" , "No build" )
self.selectingBuild = False
if self.currentBuild is not None:
if forcePanel: # choose target
if self.currentBuild.nmml is not None:
sublime.status_message("Please select a NME target")
nme_targets = []
for t in HaxeBuild.nme_targets :
nme_targets.append( t[0] )
show_quick_panel( view.window() , nme_targets, lambda i : self.select_nme_target(i, view))
elif self.currentBuild.yaml is not None:
sublime.status_message("Please select a Flambe target")
flambe_targets = []
for t in HaxeBuild.flambe_targets :
flambe_targets.append( t[0] )
show_quick_panel( view.window() , flambe_targets, lambda i : self.select_flambe_target(i, view))
else:
if self.currentBuild.nmml is not None:
bc = self.build_cache[self.currentBuild.nmml]
if HaxeBuild.nme_target and \
bc.target != HaxeBuild.nme_target:
bc.target = HaxeBuild.nme_target
args = self.extract_nme_completion_args(view)
if args:
self.currentBuild.args = args
def select_nme_target( self, i, view ):
if i == -1:
return
target = HaxeBuild.nme_targets[i]
self.haxe_settings.set('haxe_nme_target', i)
sublime.save_settings(self.haxe_settings_file)
if self.currentBuild.nmml is not None:
HaxeBuild.nme_target = target
view.set_status( "haxe-build" , self.currentBuild.to_string() )
bc = self.build_cache[self.currentBuild.nmml]
bc.target = HaxeBuild.nme_target
args = self.extract_nme_completion_args(view)
if args:
self.currentBuild.args = args
def extract_nme_completion_args(self, view):
lib = 'nme'
if self.currentBuild.lime:
lib = 'lime'
elif self.currentBuild.openfl:
lib = 'openfl'
target = HaxeBuild.nme_target[1].split(" ")[0]
res, err = runcmd( [
view.settings().get("haxelib_path" , "haxelib"),
'run', lib, 'display', self.currentBuild.nmml, target] )
if err :
return None
return [
(arg,)
for line in res.split('\n')
for arg in line.split(' ')
if arg
]
def select_flambe_target( self , i , view ):
if i == -1:
return
target = HaxeBuild.flambe_targets[i]
self.haxe_settings.set('haxe_flambe_target', i)
sublime.save_settings(self.haxe_settings_file)
if self.currentBuild.yaml is not None:
HaxeBuild.flambe_target = target
view.set_status( "haxe-build" , self.currentBuild.to_string() )
def run_build( self , view ) :
err, comps, status = self.run_haxe( view )
view.set_status( "haxe-status" , status )
def clear_output_panel(self, view) :
win = view.window()
self.panel = win.get_output_panel("haxe")
def panel_output( self , view , text , scope = None ) :
win = view.window()
if self.panel is None :
self.panel = win.get_output_panel("haxe")
panel = self.panel
text = datetime.now().strftime("%H:%M:%S") + " " + text;
edit = panel.begin_edit()
region = sublime.Region(panel.size(),panel.size() + len(text))
panel.insert(edit, panel.size(), text + "\n")
panel.end_edit( edit )
if scope is not None :
icon = "dot"
key = "haxe-" + scope
regions = panel.get_regions( key );
regions.append(region)
panel.add_regions( key , regions , scope , icon )
#print( err )
win.run_command("show_panel",{"panel":"output.haxe"})
return self.panel
def get_toplevel_completion( self , src , src_dir , build ) :
cl = []
comps = [("trace","trace"),("this","this"),("super","super"),("else","else")]
src = comments.sub("",src)
localTypes = typeDecl.findall( src )
for t in localTypes :
if t[1] not in cl:
cl.append( t[1] )
packageClasses, subPacks = self.extract_types( src_dir )
for c in packageClasses :
if c not in cl:
cl.append( c )
imports = importLine.findall( src )
imported = []
for i in imports :
imp = i[1]
imported.append(imp)
#dot = imp.rfind(".")+1
#clname = imp[dot:]
#cl.append( imp )
#print( i )
#print cl
buildClasses , buildPacks = build.get_types()
tarPkg = None
targetPackages = ["flash","flash9","flash8","neko","js","php","cpp","cs","java","nme"]
compilerVersion = HaxeComplete.inst.compilerVersion
if build.target is not None :
tarPkg = build.target
if tarPkg == "x":
tarPkg = "neko"
# haxe 2
if tarPkg == "swf9" :
tarPkg = "flash"
# haxe 3
if tarPkg == "swf8" :
tarPkg = "flash8"
if tarPkg == "swf" :
if compilerVersion >= 3 :
tarPkg = "flash"
else :
tarPkg = "flash8"
if not build.openfl and not build.lime and build.nmml is not None or "nme" in HaxeLib.available and HaxeLib.get("nme") in build.libs :
tarPkg = "nme"
targetPackages.extend( ["jeash","neash","browser","native"] )
#print( "tarpkg : " + tarPkg );
#for c in HaxeComplete.stdClasses :
# p = c.split(".")[0]
# if tarPkg is None or (p not in targetPackages) or (p == tarPkg) :
# cl.append(c)
cl.extend( imported )
cl.extend( HaxeComplete.stdClasses )
cl.extend( buildClasses )
cl = list(set(cl)) # unique
cl.sort();
packs = []
stdPackages = []
#print("target : "+build.target)
for p in HaxeComplete.stdPackages :
#print(p)
#if p == "flash9" or p == "flash8" :
# p = "flash"
if tarPkg is None or (p not in targetPackages) or (p == tarPkg) :
stdPackages.append(p)
packs.extend( stdPackages )
packs.extend( buildPacks )
packs.sort()
for v in variables.findall(src) :
comps.append(( v + "\tvar" , v ))
for f in functions.findall(src) :
if f not in ["new"] :
comps.append(( f + "\tfunction" , f ))
#TODO can we restrict this to local scope ?
for paramsText in functionParams.findall(src) :
cleanedParamsText = re.sub(paramDefault,"",paramsText)
paramsList = cleanedParamsText.split(",")
for param in paramsList:
a = param.strip();
if a.startswith("?"):
a = a[1:]
idx = a.find(":")
if idx > -1:
a = a[0:idx]
idx = a.find("=")
if idx > -1:
a = a[0:idx]
a = a.strip()
cm = (a + "\tvar", a)
if cm not in comps:
comps.append( cm )
if self.type_completion_only:
comps = []
for c in cl :
#print(c)
spl = c.split(".")
#if spl[0] == "flash9" or spl[0] == "flash8" :
# spl[0] = "flash"
top = spl[0]
#print(spl)
clname = spl.pop()
pack = ".".join(spl)
display = clname
# remove parameters
clname = clname.split('<')[0]
#if pack in imported:
# pack = ""
if pack != "" :
display += "\t" + pack
else :
display += "\tclass"
spl.append(clname)
if pack in imported or c in imported :
cm = ( display , clname )
else :
cm = ( display , ".".join(spl) )
if cm not in comps and tarPkg is None or (top not in targetPackages) or (top == tarPkg) : #( build.target is None or (top not in HaxeBuild.targets) or (top == build.target) ) :
comps.append( cm )
if not self.type_completion_only:
for p in packs :
cm = (p + "\tpackage",p)
if cm not in comps :
comps.append(cm)
return comps
def clear_build( self , view ) :
self.currentBuild = None
self.currentCompletion = {
"inp" : None,
"outp" : None
}
def get_build( self , view ) :
fn = view.file_name()
win = view.window()
if win is None or fn is None :
return
if fn is not None and self.currentBuild is None and view.score_selector(0,"source.haxe.2") > 0 :
src_dir = os.path.dirname( fn )
src = view.substr(sublime.Region(0, view.size()))
build = HaxeBuild()
build.target = "js"
folder = os.path.dirname(fn)
folders = win.folders()
for f in folders:
if f in fn :
folder = f
pack = []
for ps in packageLine.findall( src ) :
if ps == "":
continue
pack = ps.split(".")
for p in reversed(pack) :
spl = os.path.split( src_dir )
if( spl[1] == p ) :
src_dir = spl[0]
cl = os.path.basename(fn)
#if int(sublime.version() < 3000) :
# cl = cl.encode('ascii','ignore')
cl = cl[0:cl.rfind(".")]
main = pack[0:]
main.append( cl )
build.main = ".".join( main )
build.output = os.path.join(folder,build.main.lower() + ".js")
build.args.append( ("-cp" , src_dir) )
build.args.append( ("--no-output",) )
#build.args.append( ("-main" , build.main ) )
build.args.append( ( "-" + build.target , build.output ) )
#build.args.append( ("--no-output" , "-v" ) )
build.hxml = os.path.join( src_dir , "build.hxml")
#build.hxml = os.path.join( src_dir , "build.hxml")
self.currentBuild = build
if self.currentBuild is not None :
view.set_status( "haxe-build" , self.currentBuild.to_string() )
return self.currentBuild
def run_nme( self, view, build ) :
settings = view.settings()
haxelib_path = settings.get("haxelib_path" , "haxelib")
if build.openfl :
cmd = [haxelib_path,"run","openfl"]
elif build.lime :
cmd = [haxelib_path,"run","lime"]
else :
cmd = [haxelib_path,"run","nme"]
cmd += [ HaxeBuild.nme_target[2], os.path.basename(build.nmml) ]
target = HaxeBuild.nme_target[1].split(" ")
cmd.extend(target)
cmdArgs = {
"cmd": cmd,
"env": get_env(),
"working_dir": os.path.dirname(build.nmml),
"file_regex": haxeFileRegex #"^([^:]*):([0-9]+): characters [0-9]+-([0-9]+) :.*$"
}
# Sublime Text 3+ supports colorizing of the build system output
if int(sublime.version()) >= 3000:
cmdArgs["syntax"] = "Packages/Haxe/Support/HaxeResults.hidden-tmLanguage"
view.window().run_command("exec", cmdArgs)
return ("" , [], "" )
def run_flambe( self , view , build ):
cmd = [ "flambe.cmd" if os.name == "nt" else "flambe" ]
cmd += HaxeBuild.flambe_target[1].split(" ")
# Use the build server if available
buildServerMode = view.settings().get('haxe_build_server_mode', True)
if self.serverMode and buildServerMode :
cmd += ["--haxe-server", str(HaxeComplete.inst.serverPort)]
cmdArgs = {
"cmd": cmd,
"env": get_env(),
"working_dir": build.cwd,
"file_regex": haxeFileRegex #"^([^:]*):([0-9]+): characters [0-9]+-([0-9]+) :.*$"
}
# Sublime Text 3+ supports colorizing of the build system output
if int(sublime.version()) >= 3000:
cmdArgs["syntax"] = "Packages/Haxe/Support/HaxeResults.hidden-tmLanguage"
view.window().run_command("exec", cmdArgs)
return ("" , [], "" )
def init_plugin( self , view ) :
if self.inited :
return
self.inited = True
HaxeLib.scan( view )
settings = view.settings()
self.haxe_settings = sublime.load_settings(self.haxe_settings_file)
haxepath = settings.get("haxe_path","haxe")
#init selected_build_id_map
win = view.window()
if win is not None :
for v in win.views():
project_folder = None
win_folders = win.folders()
if not v.settings().has('haxe-build-id'):
continue
for f in win_folders:
if f + os.sep in v.file_name() :
project_folder = f
if project_folder is not None:
self.selected_build_id_map[project_folder] = \
int(v.settings().get('haxe-build-id'))
nme_target_idx = 0
try:
nme_target_idx = int(self.haxe_settings.get('haxe_nme_target', 0))
if nme_target_idx < 0 or \
nme_target_idx >= len(HaxeBuild.nme_targets):
nme_target_idx = 0
except:
pass
HaxeBuild.nme_target = HaxeBuild.nme_targets[nme_target_idx]
flambe_target_idx = 0
try:
flambe_target_idx = int(
self.haxe_settings.get('haxe_flambe_target', 0))
if flambe_target_idx < 0 or \
flambe_target_idx >= len(HaxeBuild.flambe_targets):
flambe_target_idx = 0
except:
pass
HaxeBuild.flambe_target = HaxeBuild.flambe_targets[flambe_target_idx]
out, err = runcmd( [haxepath, "-main", "Nothing", "-v", "--no-output"] )
_, versionOut = runcmd([haxepath, "-v"])
m = classpathLine.match(out)
if m is not None :
HaxeComplete.stdPaths = set(m.group(1).split(";")) - set([".","./"])
ver = re.search(haxeVersion , versionOut)
HaxeComplete.stdClasses = []
HaxeComplete.stdPackages = []
use_cache = view.settings().get('haxe_use_cache', True)
cached_std = None
cache_filename = None
if ver is not None :
self.compilerVersion = float(ver.group(3))
if self.compilerVersion >= 3 :
HaxeBuild.targets.append("swf8")
else :
HaxeBuild.targets.append("swf9")
self.serverMode = float(ver.group(3)) * 100 >= 209
if use_cache:
cache_filename = 'haxe_%s.cache' % ver.group(2)
cached_std = cache(cache_filename)
if cached_std is not None:
cp = cached_std.split(';')
HaxeComplete.stdClasses.extend( cp[0].split(',') )
HaxeComplete.stdPackages.extend( cp[1].split(',') )
if cached_std is None:
for p in HaxeComplete.stdPaths :
#print("std path : "+p)
if len(p) > 1 and os.path.exists(p) and os.path.isdir(p):
classes, packs = self.extract_types( p )
HaxeComplete.stdClasses.extend( classes )
HaxeComplete.stdPackages.extend( packs )
if cache_filename is not None and use_cache:
cached_std = ';'.join(
(','.join(HaxeComplete.stdClasses),
','.join(HaxeComplete.stdPackages)))
cache(cache_filename, cached_std)
buildServerMode = settings.get('haxe_build_server_mode', True)
completionServerMode = settings.get('haxe_completion_server_mode',True)
self.serverMode = self.serverMode and (buildServerMode or completionServerMode)
self.start_server( view )
def start_server( self , view = None ) :
#self.stop_server()
if self.serverMode and self.serverProc is None :
try:
# env = os.environ.copy()
merged_env = get_env(True)
if view is not None :
haxepath = view.settings().get("haxe_path" , "haxe")
self.serverPort+=1
cmd = [haxepath , "--wait" , str(self.serverPort) ]
print("Starting Haxe server on port "+str(self.serverPort))
#self.serverProc = Popen(cmd, env=env , startupinfo=STARTUP_INFO)
self.serverProc = Popen(cmd, env = merged_env, startupinfo=STARTUP_INFO)
self.serverProc.poll()
except(OSError, ValueError) as e:
err = u'Error starting Haxe server %s: %s' % (" ".join(cmd), e)
sublime.error_message(err)
def stop_server( self ) :
if self.serverProc is not None :
self.serverProc.terminate()
self.serverProc.kill()
self.serverProc.wait()
self.serverProc = None
del self.serverProc
def run_haxe( self, view , display = None, haxe_args = None) :
self.init_plugin( view )
build = self.get_build( view )
settings = view.settings()
autocomplete = display is not None
if not autocomplete and build is not None:
if build.nmml is not None :
return self.run_nme( view, build )
if build.yaml is not None :
return self.run_flambe( view , build )
fn = view.file_name()
if fn is None :
return
comps = []
args = []
cwd = build.cwd
if cwd is None :
cwd = os.path.dirname( build.hxml )
buildServerMode = settings.get('haxe_build_server_mode', True)
completionServerMode = settings.get('haxe_completion_server_mode',True)
if self.serverMode and (
( completionServerMode and autocomplete ) or
( buildServerMode and not autocomplete )
) and (
not display or 'serverMode' not in display or
display['serverMode'] ):
args.append(("--connect" , str(HaxeComplete.inst.serverPort)))
args.append(("--cwd" , cwd ))
#args.append( ("--times" , "-v" ) )
if not autocomplete :
pass
#args.append( ("--times" , "-v" ) )
else:
display_arg = display["filename"] + "@" + str( display["offset"] )
if display["mode"] is not None :
display_arg += "@" + display["mode"]
args.append( ("--display", display_arg ) )
args.append( ("-D", "st_display" ) )
if build.yaml is not None :
# Call out to `flambe haxe-flags` for Flambe completion
res, err = runcmd( ["flambe","--config" , build.yaml, "haxe-flags"] )
if err :
print("Flambe completion error: " + err)
else:
args.extend([
(arg,)
for line in res.split('\n')
for arg in line.split(' ')
if arg
])
else:
args.append( ("--no-output",) )
output = build.output
if output is None :
output = "no-output"
#args.append( ("-cp" , plugin_path ) )
#args.append( ("--macro" , "SourceTools.complete()") )
args.extend( build.args )
if haxe_args is not None:
args.extend( haxe_args )
haxepath = settings.get( 'haxe_path' , 'haxe' )
cmd = [haxepath]
for a in args :
cmd.extend( list(a) )
#
# TODO: replace runcmd with run_command('exec') when possible (haxelib, maybe build)
#
if not autocomplete :
encoded_cmd = []
for c in cmd :
#if isinstance( c , unicode) :
# encoded_cmd.append( c.encode('utf-8') )
#else :
encoded_cmd.append( c )
#print(encoded_cmd)
env = get_env()
view.window().run_command("haxe_exec", {
"cmd": encoded_cmd,
"working_dir": cwd,
"file_regex": haxeFileRegex,
"env" : env
})
return ("" , [], "" )
# print(" ".join(cmd))
res, err = runcmd( cmd, "" )
if not autocomplete :
self.panel_output( view , " ".join(cmd) )
status = ""
#print(err)
hints = []
fields = []
msg = ""
tree = None
pos = None
commas = 0
if display["commas"] is not None :
commas = display["commas"]
mode = display["mode"]
if int(sublime.version()) >= 3000 :
x = "<root>"+err+"</root>"
else :
x = "<root>"+err.encode("ASCII",'ignore')+"</root>"
try :
tree = ElementTree.XML(x);
except Exception as e :
print(e)
print("invalid xml")
if tree is not None :
for i in tree.getiterator("type") :
hint = i.text.strip()
params, ret = parse_sig(hint)
if mode == "type":
hint = ret
if params:
hint = ','.join(params)
hint = '(%s):%s' % (hint, ret)
return hint
msg = "";
if params is not None and commas >= len(params) :
if commas == 0 or hint == "Dynamic" :
msg = hint + ": No autocompletion available"
#view.window().run_command("hide_auto_complete")
#comps.append((")",""))
else :
msg = "Too many arguments."
else :
if params is None:
pass
else:
hints = params[commas:]
#print(hints)
if len(hints) == 0 :
msg = "Void"
else :
msg = ", ".join(hints)
status = msg
# This will attempt to get the full name of what we're trying to complete.
# E.g. we type in self.blarg.herp(), this will get "self.blarg".
fn_name = self.get_current_fn_name(view, view.sel()[0].end())
pos = tree.findtext("pos")
li = tree.find("list")
if li is None:
li = tree.find("il")
if li is not None :
pos = li.findtext("pos")
for i in li.getiterator("i"):
name = i.get("n")
if name is None:
name = i.text
t = i.find("t")
if t is not None:
sig = t.text
else:
sig = i.get("t")
# if sig is None:
# sig = i.get("p")
d = i.find("d")
if d is not None:
doc = d.text
else:
doc = "No Doc"
#if doc is None: doc = "No documentation found."
insert = name
hint = name
doc_data = { 'hint' : name , 'doc' : doc }
documentationStore[fn_name + "." + name] = doc_data
if sig is not None :
params, ret = parse_sig(sig)
fields.append((name, params, ret))
if params is not None :
cm = name
hint = name + "( " + " , ".join( params ) + " )\t" + ret
doc_data['hint'] = hint # update before compacting
if len(hint) > 40: # compact arguments
hint = compactFunc.sub("(...)", hint);
insert = cm
else :
hint = name + "\t" + ret
doc_data['hint'] = hint
else :
if re.match("^[A-Z]",name ) :
hint = name + "\tclass"
else :
hint = name + "\tpackage"
doc_data['hint'] = hint
#if doc is not None :
# hint += "\t" + doc
# print(doc)
if len(hint) > 40: # compact return type
m = compactProp.search(hint)
if not m is None:
hint = compactProp.sub(": " + m.group(1), hint)
comps.append( ( hint, insert ) )
if len(hints) == 0 and len(comps) == 0:
err = re.sub( u"\(display(.*)\)" ,"",err)
lines = err.split("\n")
l = lines[0].strip()
if len(l) > 0 and status == "":
if l == "<list>" or l == "<type>":
status = "No autocompletion available"
elif not re.match( haxeFileRegex , l ):
status = l
else :
status = ""
extract_errors( err, cwd )
highlight_errors( view, 5000 )
# print(comps)
if mode == "type":
return None # this should have returned earlier
if mode == "position":
return pos
return ( err, comps, status, hints, fields )
def on_query_completions(self, view, prefix, locations):
scope = view.scope_name(locations[0])
is_haxe = 'source.haxe.2' in scope
is_hxml = 'source.hxml' in scope
comps = []
#print(scope)
if not self.force_display_completion and \
not view.settings().get('haxe_auto_complete', True):
return comps
if not is_haxe and not is_hxml:
return comps
offset = locations[0] - len(prefix)
if offset == 0 :
return comps
if 'keyword.control.directive.conditional.haxe.2' in scope or \
'meta.control.directive.conditional.haxe.2' in scope or \
'string' in scope or \
'comment' in scope:
return comps
if is_hxml :
comps = self.get_hxml_completions( view , offset )
elif is_haxe :
if view.file_name().endswith(".hxsl") :
comps = self.get_hxsl_completions( view , offset )
else :
comps,hints = self.get_haxe_completions( view , offset )
return comps
def save_temp_file( self , view , force=False ) :
if not view.is_dirty() and not force:
return None
fn = view.file_name()
tdir = os.path.dirname(fn)
temp = os.path.join( tdir , os.path.basename( fn ) + ".tmp" )
src = view.substr(sublime.Region(0, view.size()))
if not os.path.exists( tdir ):
os.mkdir( tdir )
if os.path.exists( fn ):
if os.path.exists( temp ):
if os.stat( temp ).st_mode & stat.S_IWRITE == 0:
os.chmod( temp , os.stat( temp ).st_mode | stat.S_IWRITE )
shutil.copy2( temp , fn )
os.remove( temp )
# copy saved file to temp for future restoring
shutil.copy2( fn , temp )
if os.stat( fn ).st_mode & stat.S_IWRITE == 0:
os.chmod( fn , os.stat( fn ).st_mode | stat.S_IWRITE )
# write current source to file
f = codecs.open( fn , "wb" , "utf-8" , "ignore" )
f.write( src )
f.close()
return temp
def clear_temp_file( self , view , temp ) :
if temp is None:
return
fn = view.file_name()
if os.path.exists( temp ) :
if os.stat( temp ).st_mode & stat.S_IWRITE == 0:
os.chmod( temp , os.stat( temp ).st_mode | stat.S_IWRITE )
shutil.copy2( temp , fn )
# os.chmod( temp, stat.S_IWRITE )
os.remove( temp )
else:
# fn didn't exist in the first place, so we remove it
os.remove( fn )
def get_current_fn_name(self, view, offset):
nonfunction_chars = "\t -=+{}[];':\"?/><,!@#$%^&*()"
source = view.substr(sublime.Region(0, view.size()))
source = source[:offset-1]
closest_nonfunction_char_idx = -1
for ch in nonfunction_chars:
idx = source.rfind(ch)
if idx > closest_nonfunction_char_idx:
closest_nonfunction_char_idx = idx
fn_name = source[closest_nonfunction_char_idx + 1:]
return fn_name
def get_haxe_completions( self , view , offset , ignoreTopLevel=False ):
# print("OFFSET");
# print(offset);
src = view.substr(sublime.Region(0, view.size()))
fn = view.file_name()
src_dir = os.path.dirname(fn)
if fn is None :
return
hints = []
show_hints = True
#find actual autocompletable char.
toplevelComplete = False
userOffset = completeOffset = offset
prev = src[offset-1]
commas = 0
comps = []
#print("prev : "+prev)
if prev not in "(." :
fragment = view.substr(sublime.Region(0,offset))
prevDot = fragment.rfind(".")
prevPar = fragment.rfind("(")
prevComa = fragment.rfind(",")
prevColon = fragment.rfind(":")
prevBrace = fragment.rfind("{")
prevSymbol = max(prevDot,prevPar,prevComa,prevBrace,prevColon)
if prevSymbol == prevComa:
closedPars = 0
closedBrackets = 0
closedSquares = 0
for i in range( prevComa , 0 , -1 ) :
c = src[i]
if c == "]" :
closedSquares += 1
elif c == "[" :
closedSquares -= 1
elif c == ")" :
closedPars += 1
elif c == "(" :
if closedPars < 1 :
completeOffset = i+1
break
else :
closedPars -= 1
elif c == "," :
if closedPars == 0 and closedBrackets == 0 and closedSquares == 0:
commas += 1
elif c == "{" : # TODO : check for { ... , ... , ... } to have the right comma count
closedBrackets -= 1
if closedBrackets < 0 :
commas = 0
elif c == "}" :
closedBrackets += 1
#print("commas : " + str(commas))
#print("closedBrackets : " + str(closedBrackets))
#print("closedPars : " + str(closedPars))
if closedBrackets < 0 or closedSquares < 0 :
show_hints = False
else :
completeOffset = max( prevDot + 1, prevPar + 1 , prevColon + 1 )
skipped = src[completeOffset:offset]
toplevelComplete = (skipped == '' or skippable.search( skipped ) is None) and inAnonymous.search( skipped ) is None
completeChar = src[completeOffset-1]
userChar = src[userOffset-1]
inControlStruct = controlStruct.search( src[0:completeOffset] ) is not None
toplevelComplete = toplevelComplete or ( completeChar in ":(," and userChar not in ":(," ) or inControlStruct
if ignoreTopLevel:
toplevelComplete = False
mode = None
if( toplevelComplete ) :
mode = "toplevel"
offset = userOffset
else :
offset = completeOffset
if not toplevelComplete and src[offset-1]=="." and src[offset-2] in ".1234567890" :
#comps.append(("... [iterator]",".."))
comps.append((".","."))
#if toplevelComplete and (inControlStruct or completeChar not in "(,") :
# return comps,hints
inp = (fn,offset,commas,src[0:offset-1],mode,self.type_completion_only)
if (self.currentCompletion["inp"] is None or
inp != self.currentCompletion["inp"]) :
ret = ''
status = ''
hints = []
haxeComps = []
if not self.type_completion_only:
temp = self.save_temp_file( view )
byte_offset = len(codecs.encode(src[0:offset], "utf-8"))
ret , haxeComps , status , hints , _ = self.run_haxe( view , { "filename" : fn , "offset" : byte_offset , "commas" : commas , "mode" : mode })
self.clear_temp_file( view , temp )
if (toplevelComplete and len(haxeComps) == 0 or
self.type_completion_only):
haxeComps = self.get_toplevel_completion(
src , src_dir , self.get_build( view ) )
if (toplevelComplete or completeChar not in "(," or
self.type_completion_only):
comps = haxeComps
self.currentCompletion["outp"] = (ret,comps,status,hints)
else :
ret, comps, status , hints = self.currentCompletion["outp"]
self.currentCompletion["inp"] = inp
#print(ret)
#print(status)
#print(status)
view.set_status( "haxe-status", status )
#sublime.status_message("")
if not show_hints :
hints = []
self.visibleCompletionList = comps
return comps,hints
def get_hxsl_completions( self , view , offset ) :
comps = []
for t in ["Float","Float2","Float3","Float4","Matrix","M44","M33","M34","M43","Texture","CubeTexture","Int","Color","include"] :
comps.append( ( t , "hxsl Type" ) )
return comps
def get_hxml_completions( self , view , offset ) :
src = view.substr(sublime.Region(0, offset))
currentLine = src[src.rfind("\n")+1:offset]
m = libFlag.match( currentLine )
if m is not None :
return HaxeLib.get_completions()
else :
return []
def savetotemp( self, path, src ):
f = tempfile.NamedTemporaryFile( delete=False )
f.write( src )
return f
class HaxeShowDocumentation( sublime_plugin.TextCommand ) :
def run( self , edit ) :
view = self.view
complete = HaxeComplete.inst
sel = view.sel()[0]
# [('ID\tInt', 'ID'), ('_acceleration\tflixel.util.FlxPoint', '_acceleration'), ('_angleChanged\tBool', '_angleChanged'),
current_function = complete.get_current_fn_name(view, sel.end() + 1)
function_qualifications = current_function[:current_function.rfind(".")] + "." # If we have something like foo.bar.baz, this will return just foo.bar.
current_function = current_function[current_function.rfind(".") + 1:] # And this will return baz.
# Find what the autocompletion box is likely autocompleting to.
possible_function_names = [x[0].split("\t")[0] for x in complete.visibleCompletionList]
possible_function_names = [(x[:x.find("(")] if x.find("(") != -1 else x) for x in possible_function_names]
matching_function_names = []
for x in range(0, len(current_function)):
smaller_name = current_function[:-x] if x != 0 else current_function # first try quux, then quu, then qu, then q. the if/else is a weird special case of slice notation.
matching_function_names = [fn for fn in possible_function_names if fn.startswith(smaller_name)]
if len(matching_function_names) > 0: break
if len(matching_function_names) == 0: return
best_match = matching_function_names[0]
self.show_documentation(function_qualifications + best_match, edit)
# Actually display the documentation in the documentation window.
def show_documentation(self, fn_name, edit):
window = sublime.active_window()
if fn_name not in documentationStore:
return
doc_data = documentationStore[fn_name]
hint = doc_data['hint'].split("\t")
if( hint[1] == 'class' ) :
hint_text = hint[1] + " " + hint[0]
elif( hint[1] == 'package' ) :
hint_text = hint[1] + " " + hint[0] + ";"
else:
hint_text = " : ".join( hint )
documentation_text = "\n" + hint_text + "\n\n"
documentation_lines = []
if doc_data['doc'] is not None :
documentation_lines = doc_data['doc'].split("\n")
else :
documentation_lines = ["","No documentation.",""]
documentation_text += "/**\n";
for line in documentation_lines:
# Strip leading whitespace.
line = line.strip()
# Strip out any leading astericks.
if len(line) > 0 and line[0] == "*":
line = line[2:]
documentation_text += line + "\n"
documentation_text += "**/\n";
doc_view = window.get_output_panel('haxe-doc');
doc_view.set_syntax_file('Packages/Haxe/Haxe.tmLanguage')
doc_view.settings().set('word_wrap', True)
doc_view.insert(edit, doc_view.size(), documentation_text + "\n")
window.run_command("show_panel", {"panel": "output.haxe-doc"})
class HaxeExecCommand(ExecCommand):
def finish(self, *args, **kwargs):
super(HaxeExecCommand, self).finish(*args, **kwargs)
outp = self.output_view.substr(sublime.Region(0, self.output_view.size()))
hc = HaxeComplete.inst
extract_errors(
outp, self.output_view.settings().get("result_base_dir") )
highlight_errors( self.window.active_view() )
def run(self, cmd = [], shell_cmd = None, file_regex = "", line_regex = "", working_dir = "",
encoding = None, env = {}, quiet = False, kill = False,
word_wrap = True,
# Catches "path" and "shell"
**kwargs):
if int(sublime.version()) >= 3080:
# clear the text_queue
self.text_queue_lock.acquire()
try:
self.text_queue.clear()
self.text_queue_proc = None
finally:
self.text_queue_lock.release()
if kill:
if self.proc:
self.proc.kill()
self.proc = None
self.append_data(None, "[Cancelled]")
return
if not hasattr(self, 'output_view'):
# Try not to call get_output_panel until the regexes are assigned
self.output_view = self.window.get_output_panel("exec")
# Default the to the current files directory if no working directory was given
if (working_dir == "" and self.window.active_view()
and self.window.active_view().file_name()):
working_dir = os.path.dirname(self.window.active_view().file_name())
self.output_view.settings().set("result_file_regex", file_regex)
self.output_view.settings().set("result_line_regex", line_regex)
self.output_view.settings().set("result_base_dir", working_dir)
self.output_view.settings().set("word_wrap", word_wrap)
self.output_view.settings().set("line_numbers", False)
self.output_view.settings().set("gutter", False)
self.output_view.settings().set("scroll_past_end", False)
self.output_view.assign_syntax(
'Packages/Haxe/Support/HaxeResults.hidden-tmLanguage')
# Call get_output_panel a second time after assigning the above
# settings, so that it'll be picked up as a result buffer
self.window.get_output_panel("exec")
if encoding is None :
if int(sublime.version()) >= 3000 :
encoding = sys.getfilesystemencoding()
else:
encoding = "utf-8"
self.encoding = encoding
self.quiet = quiet
self.proc = None
if not self.quiet:
if int(sublime.version()) >= 3000 :
print( "Running " + " ".join(cmd) )
else :
print( "Running " + " ".join(cmd).encode('utf-8') )
sublime.status_message("Building")
show_panel_on_build = sublime.load_settings("Preferences.sublime-settings").get("show_panel_on_build", True)
if show_panel_on_build:
self.window.run_command("show_panel", {"panel": "output.exec"})
merged_env = env.copy()
if self.window.active_view():
user_env = self.window.active_view().settings().get('build_env')
if user_env:
merged_env.update(user_env)
# Change to the working dir, rather than spawning the process with it,
# so that emitted working dir relative path names make sense
if working_dir != "":
os.chdir(working_dir)
self.debug_text = ""
if shell_cmd:
self.debug_text += "[shell_cmd: " + shell_cmd + "]\n"
else:
self.debug_text += "[cmd: " + str(cmd) + "]\n"
self.debug_text += "[dir: " + str(os.getcwd()) + "]\n"
if "PATH" in merged_env:
self.debug_text += "[path: " + str(merged_env["PATH"]) + "]"
else:
self.debug_text += "[path: " + str(os.environ["PATH"]) + "]"
err_type = OSError
if os.name == "nt":
err_type = WindowsError
try:
# Forward kwargs to AsyncProcess
if int(sublime.version()) >= 3080 :
self.proc = AsyncProcess(cmd, shell_cmd, merged_env, self, **kwargs)
self.text_queue_lock.acquire()
try:
self.text_queue_proc = self.proc
finally:
self.text_queue_lock.release()
elif int(sublime.version()) >= 3000 :
self.proc = AsyncProcess(cmd, None, merged_env, self, **kwargs)
else :
self.proc = AsyncProcess([c.encode(sys.getfilesystemencoding()) for c in cmd], merged_env, self, **kwargs)
except err_type as e:
self.append_data(None, str(e) + "\n")
self.append_data(None, "[cmd: " + str(cmd) + "]\n")
self.append_data(None, "[dir: " + str(os.getcwdu()) + "]\n")
if "PATH" in merged_env:
self.append_data(None, "[path: " + str(merged_env["PATH"]) + "]\n")
else:
self.append_data(None, "[path: " + str(os.environ["PATH"]) + "]\n")
if not self.quiet:
self.append_data(None, "[Finished]")
def is_visible():
return false
def on_data(self, proc, data):
if int(sublime.version()) >= 3080:
try:
str = data.decode(self.encoding)
except:
str = "[Decode error - output not " + self.encoding + "]\n"
proc = None
# Normalize newlines, Sublime Text always uses a single \n separator
# in memory.
str = str.replace('\r\n', '\n').replace('\r', '\n')
self.append_string(proc, str)
else:
sublime.set_timeout(functools.partial(self.append_data, proc, data), 0)
def on_finished(self, proc):
sublime.set_timeout(functools.partial(self.finish, proc), 1)
class HaxelibExecCommand(ExecCommand):
def finish(self, *args, **kwargs):
super(HaxelibExecCommand, self).finish(*args, **kwargs)
HaxeLib.scan( sublime.active_window().active_view() )
def is_visible():
return false
| clemos/haxe-sublime-bundle | HaxeComplete.py | Python | apache-2.0 | 82,830 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class State(object):
def get(self):
return self._state
def set(self, value):
self._state = value
return
def activate(self):
self._state = True
return
def deactivate(self):
self._state = False
return
def flip(self):
self._state ^= True
return
def __init__(self, initialValue):
self._state = initialValue
return
# version
__id__ = "$Id: State.py,v 1.1.1.1 2005/03/08 16:13:53 aivazis Exp $"
# End of file
| bmi-forum/bmi-pyre | pythia-0.8/packages/journal/journal/diagnostics/State.py | Python | gpl-2.0 | 929 |
import socket
import sys
from thread import *
ServerIp = ''
ServerPort = 54321 #Socket Server Port Num
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
try:
s.bind((ServerIp, ServerPort))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Msg ' + msg[1]
sys.exit()
print 'Socket Bind OK!!'
s.listen(10)
print 'Socket Listening..'
def clientthread(conn):
conn.send('This is Rpi Socket Server.')
while True:
data = conn.recv(1024)
print data
if not data:
break
conn.sendall('Recv Cmd Success!!')
conn.close()
while 1:
conn, ipAddr = s.accept()
print 'Socket Client(Android phone) IP is' + ipAddr[0] + ':' + str(ipAddr[1])
start_new_thread(clientthread ,(conn,))
s.close()
| rurume/Arduino_example | examples/week2/Robot_Hand/Python_Rpi_Socket_Server/Rpi_Socket_Server.py | Python | gpl-2.0 | 865 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_user_info import V1beta1UserInfo
class TestV1beta1UserInfo(unittest.TestCase):
""" V1beta1UserInfo unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1UserInfo(self):
"""
Test V1beta1UserInfo
"""
model = kubernetes.client.models.v1beta1_user_info.V1beta1UserInfo()
if __name__ == '__main__':
unittest.main()
| djkonro/client-python | kubernetes/test/test_v1beta1_user_info.py | Python | apache-2.0 | 859 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import ssl
from twisted.python.util import sibpath
from OpenSSL import SSL
class ClientTLSContext(ssl.ClientContextFactory):
isClient = 1
def getContext(self):
return SSL.Context(SSL.TLSv1_METHOD)
class ServerTLSContext:
isClient = 0
def __init__(self, filename = sibpath(__file__, 'server.pem')):
self.filename = filename
def getContext(self):
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.use_certificate_file(self.filename)
ctx.use_privatekey_file(self.filename)
return ctx
| sorenh/cc | vendor/Twisted-10.0.0/twisted/test/ssl_helpers.py | Python | apache-2.0 | 655 |
# coding=utf-8
"""
In this module resides the core data structures and logic of the plugin system. It is implemented in an OctoPrint-agnostic
way and could be extracted into a separate Python module in the future.
.. autoclass:: PluginManager
:members:
.. autoclass:: PluginInfo
:members:
.. autoclass:: Plugin
:members:
"""
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import os
import imp
from collections import defaultdict, namedtuple
import logging
import pkg_resources
import pkginfo
EntryPointOrigin = namedtuple("EntryPointOrigin", "type, entry_point, module_name, package_name, package_version")
FolderOrigin = namedtuple("FolderOrigin", "type, folder")
class PluginInfo(object):
"""
The :class:`PluginInfo` class wraps all available information about a registered plugin.
This includes its meta data (like name, description, version, etc) as well as the actual plugin extensions like
implementations, hooks and helpers.
It works on Python module objects and extracts the relevant data from those via accessing the
:ref:`control properties <sec-plugin-concepts-controlproperties>`.
Arguments:
key (str): Identifier of the plugin
location (str): Installation folder of the plugin
instance (module): Plugin module instance
name (str): Human readable name of the plugin
version (str): Version of the plugin
description (str): Description of the plugin
author (str): Author of the plugin
url (str): URL of the website of the plugin
license (str): License of the plugin
"""
attr_name = '__plugin_name__'
""" Module attribute from which to retrieve the plugin's human readable name. """
attr_description = '__plugin_description__'
""" Module attribute from which to retrieve the plugin's description. """
attr_version = '__plugin_version__'
""" Module attribute from which to retrieve the plugin's version. """
attr_author = '__plugin_author__'
""" Module attribute from which to retrieve the plugin's author. """
attr_url = '__plugin_url__'
""" Module attribute from which to retrieve the plugin's website URL. """
attr_license = '__plugin_license__'
""" Module attribute from which to retrieve the plugin's license. """
attr_hooks = '__plugin_hooks__'
""" Module attribute from which to retrieve the plugin's provided hooks. """
attr_implementation = '__plugin_implementation__'
""" Module attribute from which to retrieve the plugin's provided mixin implementation. """
attr_implementations = '__plugin_implementations__'
"""
Module attribute from which to retrieve the plugin's provided implementations.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_implementation`. Only the
first entry will be evaluated.
.. deprecated:: 1.2.0-dev-694
Use :attr:`attr_implementation` instead.
"""
attr_helpers = '__plugin_helpers__'
""" Module attribute from which to retrieve the plugin's provided helpers. """
attr_check = '__plugin_check__'
""" Module attribute which to call to determine if the plugin can be loaded. """
attr_init = '__plugin_init__'
"""
Module attribute which to call when loading the plugin.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_load`.
.. deprecated:: 1.2.0-dev-720
Use :attr:`attr_load` instead.
"""
attr_load = '__plugin_load__'
""" Module attribute which to call when loading the plugin. """
attr_unload = '__plugin_unload__'
""" Module attribute which to call when unloading the plugin. """
attr_enable = '__plugin_enable__'
""" Module attribute which to call when enabling the plugin. """
attr_disable = '__plugin_disable__'
""" Module attribute which to call when disabling the plugin. """
def __init__(self, key, location, instance, name=None, version=None, description=None, author=None, url=None, license=None):
self.key = key
self.location = location
self.instance = instance
self.origin = None
self.enabled = True
self.bundled = False
self.loaded = False
self._name = name
self._version = version
self._description = description
self._author = author
self._url = url
self._license = license
def validate(self, phase, additional_validators=None):
if phase == "before_load":
# if the plugin still uses __plugin_init__, log a deprecation warning and move it to __plugin_load__
if hasattr(self.instance, self.__class__.attr_init):
if not hasattr(self.instance, self.__class__.attr_load):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_init__, use __plugin_load__ instead".format(name=self.key), DeprecationWarning)
# move it
init = getattr(self.instance, self.__class__.attr_init)
setattr(self.instance, self.__class__.attr_load, init)
# delete __plugin_init__
delattr(self.instance, self.__class__.attr_init)
elif phase == "after_load":
# if the plugin still uses __plugin_implementations__, log a deprecation warning and put the first
# item into __plugin_implementation__
if hasattr(self.instance, self.__class__.attr_implementations):
if not hasattr(self.instance, self.__class__.attr_implementation):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_implementations__, use __plugin_implementation__ instead - only the first implementation of {name} will be recognized".format(name=self.key), DeprecationWarning)
# put first item into __plugin_implementation__
implementations = getattr(self.instance, self.__class__.attr_implementations)
if len(implementations) > 0:
setattr(self.instance, self.__class__.attr_implementation, implementations[0])
# delete __plugin_implementations__
delattr(self.instance, self.__class__.attr_implementations)
if additional_validators is not None:
for validator in additional_validators:
validator(phase, self)
def __str__(self):
if self.version:
return "{name} ({version})".format(name=self.name, version=self.version)
else:
return self.name
def long_str(self, show_bundled=False, bundled_strs=(" [B]", ""),
show_location=False, location_str=" - {location}",
show_enabled=False, enabled_strs=("* ", " ")):
"""
Long string representation of the plugin's information. Will return a string of the format ``<enabled><str(self)><bundled><location>``.
``enabled``, ``bundled`` and ``location`` will only be displayed if the corresponding flags are set to ``True``.
The will be filled from ``enabled_str``, ``bundled_str`` and ``location_str`` as follows:
``enabled_str``
a 2-tuple, the first entry being the string to insert when the plugin is enabled, the second
entry the string to insert when it is not.
``bundled_str``
a 2-tuple, the first entry being the string to insert when the plugin is bundled, the second
entry the string to insert when it is not.
``location_str``
a format string (to be parsed with ``str.format``), the ``{location}`` placeholder will be
replaced with the plugin's installation folder on disk.
Arguments:
show_enabled (boolean): whether to show the ``enabled`` part
enabled_strs (tuple): the 2-tuple containing the two possible strings to use for displaying the enabled state
show_bundled (boolean): whether to show the ``bundled`` part
bundled_strs(tuple): the 2-tuple containing the two possible strings to use for displaying the bundled state
show_location (boolean): whether to show the ``location`` part
location_str (str): the format string to use for displaying the plugin's installation location
Returns:
str: The long string representation of the plugin as described above
"""
if show_enabled:
ret = enabled_strs[0] if self.enabled else enabled_strs[1]
else:
ret = ""
ret += str(self)
if show_bundled:
ret += bundled_strs[0] if self.bundled else bundled_strs[1]
if show_location and self.location:
ret += location_str.format(location=self.location)
return ret
def get_hook(self, hook):
"""
Arguments:
hook (str): Hook to return.
Returns:
callable or None: Handler for the requested ``hook`` or None if no handler is registered.
"""
if not hook in self.hooks:
return None
return self.hooks[hook]
def get_implementation(self, *types):
"""
Arguments:
types (list): List of :class:`Plugin` sub classes all returned implementations need to implement.
Returns:
object: The plugin's implementation if it matches all of the requested ``types``, None otherwise.
"""
if not self.implementation:
return None
for t in types:
if not isinstance(self.implementation, t):
return None
return self.implementation
@property
def name(self):
"""
Human readable name of the plugin. Will be taken from name attribute of the plugin module if available,
otherwise from the ``name`` supplied during construction with a fallback to ``key``.
Returns:
str: Name of the plugin, fallback is the plugin's identifier.
"""
return self._get_instance_attribute(self.__class__.attr_name, defaults=(self._name, self.key))
@property
def description(self):
"""
Description of the plugin. Will be taken from the description attribute of the plugin module as defined in
:attr:`attr_description` if available, otherwise from the ``description`` supplied during construction.
May be None.
Returns:
str or None: Description of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_description, default=self._description)
@property
def version(self):
"""
Version of the plugin. Will be taken from the version attribute of the plugin module as defined in
:attr:`attr_version` if available, otherwise from the ``version`` supplied during construction. May be None.
Returns:
str or None: Version of the plugin.
"""
return self._version if self._version is not None else self._get_instance_attribute(self.__class__.attr_version, default=self._version)
@property
def author(self):
"""
Author of the plugin. Will be taken from the author attribute of the plugin module as defined in
:attr:`attr_author` if available, otherwise from the ``author`` supplied during construction. May be None.
Returns:
str or None: Author of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_author, default=self._author)
@property
def url(self):
"""
Website URL for the plugin. Will be taken from the url attribute of the plugin module as defined in
:attr:`attr_url` if available, otherwise from the ``url`` supplied during construction. May be None.
Returns:
str or None: Website URL for the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_url, default=self._url)
@property
def license(self):
"""
License of the plugin. Will be taken from the license attribute of the plugin module as defined in
:attr:`attr_license` if available, otherwise from the ``license`` supplied during construction. May be None.
Returns:
str or None: License of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_license, default=self._license)
@property
def hooks(self):
"""
Hooks provided by the plugin. Will be taken from the hooks attribute of the plugin module as defiend in
:attr:`attr_hooks` if available, otherwise an empty dictionary is returned.
Returns:
dict: Hooks provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_hooks, default={})
@property
def implementation(self):
"""
Implementation provided by the plugin. Will be taken from the implementation attribute of the plugin module
as defined in :attr:`attr_implementation` if available, otherwise None is returned.
Returns:
object: Implementation provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_implementation, default=None)
@property
def helpers(self):
"""
Helpers provided by the plugin. Will be taken from the helpers attribute of the plugin module as defined in
:attr:`attr_helpers` if available, otherwise an empty list is returned.
Returns:
dict: Helpers provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_helpers, default={})
@property
def check(self):
"""
Method for pre-load check of plugin. Will be taken from the check attribute of the plugin module as defined in
:attr:`attr_check` if available, otherwise a lambda always returning True is returned.
Returns:
callable: Check method for the plugin module which should return True if the plugin can be loaded, False
otherwise.
"""
return self._get_instance_attribute(self.__class__.attr_check, default=lambda: True)
@property
def load(self):
"""
Method for loading the plugin module. Will be taken from the load attribute of the plugin module as defined
in :attr:`attr_load` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Load method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_load, default=lambda: True)
@property
def unload(self):
"""
Method for unloading the plugin module. Will be taken from the unload attribute of the plugin module as defined
in :attr:`attr_unload` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Unload method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_unload, default=lambda: True)
@property
def enable(self):
"""
Method for enabling the plugin module. Will be taken from the enable attribute of the plugin module as defined
in :attr:`attr_enable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Enable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_enable, default=lambda: True)
@property
def disable(self):
"""
Method for disabling the plugin module. Will be taken from the disable attribute of the plugin module as defined
in :attr:`attr_disable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Disable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_disable, default=lambda: True)
def _get_instance_attribute(self, attr, default=None, defaults=None):
if not hasattr(self.instance, attr):
if defaults is not None:
for value in defaults:
if value is not None:
return value
return default
return getattr(self.instance, attr)
class PluginManager(object):
"""
The :class:`PluginManager` is the central component for finding, loading and accessing plugins provided to the
system.
It is able to discover plugins both through possible file system locations as well as customizable entry points.
"""
def __init__(self, plugin_folders, plugin_types, plugin_entry_points, logging_prefix=None,
plugin_disabled_list=None, plugin_restart_needing_hooks=None, plugin_obsolete_hooks=None,
plugin_validators=None):
self.logger = logging.getLogger(__name__)
if logging_prefix is None:
logging_prefix = ""
if plugin_disabled_list is None:
plugin_disabled_list = []
self.plugin_folders = plugin_folders
self.plugin_types = plugin_types
self.plugin_entry_points = plugin_entry_points
self.plugin_disabled_list = plugin_disabled_list
self.plugin_restart_needing_hooks = plugin_restart_needing_hooks
self.plugin_obsolete_hooks = plugin_obsolete_hooks
self.plugin_validators = plugin_validators
self.logging_prefix = logging_prefix
self.enabled_plugins = dict()
self.disabled_plugins = dict()
self.plugin_hooks = defaultdict(list)
self.plugin_implementations = dict()
self.plugin_implementations_by_type = defaultdict(list)
self.implementation_injects = dict()
self.implementation_inject_factories = []
self.implementation_pre_inits = []
self.implementation_post_inits = []
self.on_plugin_loaded = lambda *args, **kwargs: None
self.on_plugin_unloaded = lambda *args, **kwargs: None
self.on_plugin_enabled = lambda *args, **kwargs: None
self.on_plugin_disabled = lambda *args, **kwargs: None
self.on_plugin_implementations_initialized = lambda *args, **kwargs: None
self.registered_clients = []
self.marked_plugins = defaultdict(list)
self.reload_plugins(startup=True, initialize_implementations=False)
@property
def plugins(self):
plugins = dict(self.enabled_plugins)
plugins.update(self.disabled_plugins)
return plugins
def find_plugins(self, existing=None, ignore_uninstalled=True):
if existing is None:
existing = dict(self.plugins)
result = dict()
if self.plugin_folders:
result.update(self._find_plugins_from_folders(self.plugin_folders, existing, ignored_uninstalled=ignore_uninstalled))
if self.plugin_entry_points:
existing.update(result)
result.update(self._find_plugins_from_entry_points(self.plugin_entry_points, existing, ignore_uninstalled=ignore_uninstalled))
return result
def _find_plugins_from_folders(self, folders, existing, ignored_uninstalled=True):
result = dict()
for folder in folders:
readonly = False
if isinstance(folder, (list, tuple)):
if len(folder) == 2:
folder, readonly = folder
else:
continue
if not os.path.exists(folder):
self.logger.warn("Plugin folder {folder} could not be found, skipping it".format(folder=folder))
continue
entries = os.listdir(folder)
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "__init__.py")):
key = entry
elif os.path.isfile(path) and entry.endswith(".py"):
key = entry[:-3] # strip off the .py extension
else:
continue
if key in existing or key in result or (ignored_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined, ignore it
continue
plugin = self._import_plugin_from_module(key, folder=folder)
if plugin:
plugin.origin = FolderOrigin("folder", folder)
if readonly:
plugin.bundled = True
plugin.enabled = False
result[key] = plugin
return result
def _find_plugins_from_entry_points(self, groups, existing, ignore_uninstalled=True):
result = dict()
# let's make sure we have a current working set
working_set = pkg_resources.WorkingSet()
if not isinstance(groups, (list, tuple)):
groups = [groups]
for group in groups:
for entry_point in working_set.iter_entry_points(group=group, name=None):
key = entry_point.name
module_name = entry_point.module_name
version = entry_point.dist.version
if key in existing or key in result or (ignore_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined or marked as uninstalled, ignore it
continue
kwargs = dict(module_name=module_name, version=version)
package_name = None
try:
module_pkginfo = InstalledEntryPoint(entry_point)
except:
self.logger.exception("Something went wrong while retrieving package info data for module %s" % module_name)
else:
kwargs.update(dict(
name=module_pkginfo.name,
summary=module_pkginfo.summary,
author=module_pkginfo.author,
url=module_pkginfo.home_page,
license=module_pkginfo.license
))
package_name = module_pkginfo.name
plugin = self._import_plugin_from_module(key, **kwargs)
if plugin:
plugin.origin = EntryPointOrigin("entry_point", group, module_name, package_name, version)
plugin.enabled = False
result[key] = plugin
return result
def _import_plugin_from_module(self, key, folder=None, module_name=None, name=None, version=None, summary=None, author=None, url=None, license=None):
# TODO error handling
try:
if folder:
module = imp.find_module(key, [folder])
elif module_name:
module = imp.find_module(module_name)
else:
return None
except:
self.logger.warn("Could not locate plugin {key}")
return None
plugin = self._import_plugin(key, *module, name=name, version=version, summary=summary, author=author, url=url, license=license)
if plugin is None:
return None
if plugin.check():
return plugin
else:
self.logger.warn("Plugin \"{plugin}\" did not pass check".format(plugin=str(plugin)))
return None
def _import_plugin(self, key, f, filename, description, name=None, version=None, summary=None, author=None, url=None, license=None):
try:
instance = imp.load_module(key, f, filename, description)
return PluginInfo(key, filename, instance, name=name, version=version, description=summary, author=author, url=url, license=license)
except:
self.logger.exception("Error loading plugin {key}".format(key=key))
return None
def _is_plugin_disabled(self, key):
return key in self.plugin_disabled_list or key.endswith('disabled')
def reload_plugins(self, startup=False, initialize_implementations=True, force_reload=None):
self.logger.info("Loading plugins from {folders} and installed plugin packages...".format(
folders=", ".join(map(lambda x: x[0] if isinstance(x, tuple) else str(x), self.plugin_folders))
))
if force_reload is None:
force_reload = []
plugins = self.find_plugins(existing=dict((k, v) for k, v in self.plugins.items() if not k in force_reload))
self.disabled_plugins.update(plugins)
for name, plugin in plugins.items():
try:
self.load_plugin(name, plugin, startup=startup, initialize_implementation=initialize_implementations)
if not self._is_plugin_disabled(name):
self.enable_plugin(name, plugin=plugin, initialize_implementation=initialize_implementations, startup=startup)
except PluginNeedsRestart:
pass
except PluginLifecycleException as e:
self.logger.info(str(e))
if len(self.enabled_plugins) <= 0:
self.logger.info("No plugins found")
else:
self.logger.info("Found {count} plugin(s) providing {implementations} mixin implementations, {hooks} hook handlers".format(
count=len(self.enabled_plugins) + len(self.disabled_plugins),
implementations=len(self.plugin_implementations),
hooks=sum(map(lambda x: len(x), self.plugin_hooks.values()))
))
def mark_plugin(self, name, uninstalled=None):
if not name in self.plugins:
self.logger.warn("Trying to mark an unknown plugin {name}".format(**locals()))
if uninstalled is not None:
if uninstalled and not name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].append(name)
elif not uninstalled and name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].remove(name)
def load_plugin(self, name, plugin=None, startup=False, initialize_implementation=True):
if not name in self.plugins:
self.logger.warn("Trying to load an unknown plugin {name}".format(**locals()))
return
if plugin is None:
plugin = self.plugins[name]
try:
plugin.validate("before_load", additional_validators=self.plugin_validators)
plugin.load()
plugin.validate("after_load", additional_validators=self.plugin_validators)
self.on_plugin_loaded(name, plugin)
plugin.loaded = True
self.logger.debug("Loaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error loading plugin %s" % name)
def unload_plugin(self, name):
if not name in self.plugins:
self.logger.warn("Trying to unload unknown plugin {name}".format(**locals()))
return
plugin = self.plugins[name]
try:
if plugin.enabled:
self.disable_plugin(name, plugin=plugin)
plugin.unload()
self.on_plugin_unloaded(name, plugin)
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if name in self.disabled_plugins:
del self.disabled_plugins[name]
plugin.loaded = False
self.logger.debug("Unloaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error unloading plugin {name}".format(**locals()))
# make sure the plugin is NOT in the list of enabled plugins but in the list of disabled plugins
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if not name in self.disabled_plugins:
self.disabled_plugins[name] = plugin
def enable_plugin(self, name, plugin=None, initialize_implementation=True, startup=False):
if not name in self.disabled_plugins:
self.logger.warn("Tried to enable plugin {name}, however it is not disabled".format(**locals()))
return
if plugin is None:
plugin = self.disabled_plugins[name]
if not startup and self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
if self.has_obsolete_hooks(plugin):
raise PluginCantEnable(name, "Dependency on obsolete hooks detected, full functionality cannot be guaranteed")
try:
plugin.enable()
self._activate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while enabling plugin {name}".format(**locals()))
return False
else:
if name in self.disabled_plugins:
del self.disabled_plugins[name]
self.enabled_plugins[name] = plugin
plugin.enabled = True
if plugin.implementation:
if initialize_implementation:
if not self.initialize_implementation_of_plugin(name, plugin):
return False
plugin.implementation.on_plugin_enabled()
self.on_plugin_enabled(name, plugin)
self.logger.debug("Enabled plugin {name}: {plugin}".format(**locals()))
return True
def disable_plugin(self, name, plugin=None):
if not name in self.enabled_plugins:
self.logger.warn("Tried to disable plugin {name}, however it is not enabled".format(**locals()))
return
if plugin is None:
plugin = self.enabled_plugins[name]
if self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
try:
plugin.disable()
self._deactivate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while disabling plugin {name}".format(**locals()))
return False
else:
if name in self.enabled_plugins:
del self.enabled_plugins[name]
self.disabled_plugins[name] = plugin
plugin.enabled = False
if plugin.implementation:
plugin.implementation.on_plugin_disabled()
self.on_plugin_disabled(name, plugin)
self.logger.debug("Disabled plugin {name}: {plugin}".format(**locals()))
return True
def _activate_plugin(self, name, plugin):
plugin.hotchangeable = self.is_restart_needing_plugin(plugin)
# evaluate registered hooks
for hook, callback in plugin.hooks.items():
self.plugin_hooks[hook].append((name, callback))
# evaluate registered implementation
if plugin.implementation:
for plugin_type in self.plugin_types:
if isinstance(plugin.implementation, plugin_type):
self.plugin_implementations_by_type[plugin_type].append((name, plugin.implementation))
self.plugin_implementations[name] = plugin.implementation
def _deactivate_plugin(self, name, plugin):
for hook, callback in plugin.hooks.items():
try:
self.plugin_hooks[hook].remove((name, callback))
except ValueError:
# that's ok, the plugin was just not registered for the hook
pass
if plugin.implementation is not None:
if name in self.plugin_implementations:
del self.plugin_implementations[name]
for plugin_type in self.plugin_types:
try:
self.plugin_implementations_by_type[plugin_type].remove((name, plugin.implementation))
except ValueError:
# that's ok, the plugin was just not registered for the type
pass
def is_restart_needing_plugin(self, plugin):
return self.has_restart_needing_implementation(plugin) or self.has_restart_needing_hooks(plugin)
def has_restart_needing_implementation(self, plugin):
if not plugin.implementation:
return False
return isinstance(plugin.implementation, RestartNeedingPlugin)
def has_restart_needing_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_restart_needing_hook(hook):
return True
return False
def has_obsolete_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_obsolete_hook(hook):
return True
return False
def is_restart_needing_hook(self, hook):
if self.plugin_restart_needing_hooks is None:
return False
for h in self.plugin_restart_needing_hooks:
if hook.startswith(h):
return True
return False
def is_obsolete_hook(self, hook):
if self.plugin_obsolete_hooks is None:
return False
return hook in self.plugin_obsolete_hooks
def initialize_implementations(self, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
for name, plugin in self.enabled_plugins.items():
self.initialize_implementation_of_plugin(name, plugin,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
self.logger.info("Initialized {count} plugin(s)".format(count=len(self.plugin_implementations)))
def initialize_implementation_of_plugin(self, name, plugin, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if plugin.implementation is None:
return
return self.initialize_implementation(name, plugin, plugin.implementation,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
def initialize_implementation(self, name, plugin, implementation, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if additional_injects is None:
additional_injects = dict()
if additional_inject_factories is None:
additional_inject_factories = []
if additional_pre_inits is None:
additional_pre_inits = []
if additional_post_inits is None:
additional_post_inits = []
injects = self.implementation_injects
injects.update(additional_injects)
inject_factories = self.implementation_inject_factories
inject_factories += additional_inject_factories
pre_inits = self.implementation_pre_inits
pre_inits += additional_pre_inits
post_inits = self.implementation_post_inits
post_inits += additional_post_inits
try:
kwargs = dict(injects)
kwargs.update(dict(
identifier=name,
plugin_name=plugin.name,
plugin_version=plugin.version,
basefolder=os.path.realpath(plugin.location),
logger=logging.getLogger(self.logging_prefix + name),
))
# inject the additional_injects
for arg, value in kwargs.items():
setattr(implementation, "_" + arg, value)
# inject any injects produced in the additional_inject_factories
for factory in inject_factories:
try:
return_value = factory(name, implementation)
except:
self.logger.exception("Exception while executing injection factory %r" % factory)
else:
if return_value is not None:
if isinstance(return_value, dict):
for arg, value in return_value.items():
setattr(implementation, "_" + arg, value)
# execute any additional pre init methods
for pre_init in pre_inits:
pre_init(name, implementation)
implementation.initialize()
# execute any additional post init methods
for post_init in post_inits:
post_init(name, implementation)
except Exception as e:
self._deactivate_plugin(name, plugin)
plugin.enabled = False
if isinstance(e, PluginLifecycleException):
raise e
else:
self.logger.exception("Exception while initializing plugin {name}, disabling it".format(**locals()))
return False
else:
self.on_plugin_implementations_initialized(name, plugin)
self.logger.debug("Initialized plugin mixin implementation for plugin {name}".format(**locals()))
return True
def log_all_plugins(self, show_bundled=True, bundled_str=(" (bundled)", ""), show_location=True, location_str=" = {location}", show_enabled=True, enabled_str=(" ", "!")):
all_plugins = self.enabled_plugins.values() + self.disabled_plugins.values()
if len(all_plugins) <= 0:
self.logger.info("No plugins available")
else:
self.logger.info("{count} plugin(s) registered with the system:\n{plugins}".format(count=len(all_plugins), plugins="\n".join(
sorted(
map(lambda x: "| " + x.long_str(show_bundled=show_bundled,
bundled_strs=bundled_str,
show_location=show_location,
location_str=location_str,
show_enabled=show_enabled,
enabled_strs=enabled_str),
self.enabled_plugins.values())
)
)))
def get_plugin(self, identifier, require_enabled=True):
"""
Retrieves the module of the plugin identified by ``identifier``. If the plugin is not registered or disabled and
``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
module: The requested plugin module or None
"""
plugin_info = self.get_plugin_info(identifier, require_enabled=require_enabled)
if plugin_info is not None:
return plugin_info.instance
return None
def get_plugin_info(self, identifier, require_enabled=True):
"""
Retrieves the :class:`PluginInfo` instance identified by ``identifier``. If the plugin is not registered or
disabled and ``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
~.PluginInfo: The requested :class:`PluginInfo` or None
"""
if identifier in self.enabled_plugins:
return self.enabled_plugins[identifier]
elif not require_enabled and identifier in self.disabled_plugins:
return self.disabled_plugins[identifier]
return None
def get_hooks(self, hook):
"""
Retrieves all registered handlers for the specified hook.
Arguments:
hook (str): The hook for which to retrieve the handlers.
Returns:
dict: A dict containing all registered handlers mapped by their plugin's identifier.
"""
if not hook in self.plugin_hooks:
return dict()
return {hook[0]: hook[1] for hook in self.plugin_hooks[hook]}
def get_implementations(self, *types):
"""
Get all mixin implementations that implement *all* of the provided ``types``.
Arguments:
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found implementations
"""
result = None
for t in types:
implementations = self.plugin_implementations_by_type[t]
if result is None:
result = set(implementations)
else:
result = result.intersection(implementations)
if result is None:
return dict()
return [impl[1] for impl in result]
def get_filtered_implementations(self, f, *types):
"""
Get all mixin implementation that implementat *all* of the provided ``types`` and match the provided filter `f`.
Arguments:
f (callable): A filter function returning True for implementations to return and False for those to exclude.
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found and matching implementations.
"""
assert callable(f)
implementations = self.get_implementations(*types)
return filter(f, implementations)
def get_helpers(self, name, *helpers):
"""
Retrieves the named ``helpers`` for the plugin with identifier ``name``.
If the plugin is not available, returns None. Otherwise returns a :class:`dict` with the requested plugin
helper names mapped to the method - if a helper could not be resolved, it will be missing from the dict.
Arguments:
name (str): Identifier of the plugin for which to look up the ``helpers``.
helpers (one or more str): Identifiers of the helpers of plugin ``name`` to return.
Returns:
dict: A dictionary of all resolved helpers, mapped by their identifiers, or None if the plugin was not
registered with the system.
"""
if not name in self.enabled_plugins:
return None
plugin = self.enabled_plugins[name]
all_helpers = plugin.helpers
if len(helpers):
return dict((k, v) for (k, v) in all_helpers.items() if k in helpers)
else:
return all_helpers
def register_message_receiver(self, client):
"""
Registers a ``client`` for receiving plugin messages. The ``client`` needs to be a callable accepting two
input arguments, ``plugin`` (the sending plugin's identifier) and ``data`` (the message itself).
"""
if client is None:
return
self.registered_clients.append(client)
def unregister_message_receiver(self, client):
"""
Unregisters a ``client`` for receiving plugin messages.
"""
self.registered_clients.remove(client)
def send_plugin_message(self, plugin, data):
"""
Sends ``data`` in the name of ``plugin`` to all currently registered message receivers by invoking them
with the two arguments.
Arguments:
plugin (str): The sending plugin's identifier.
data (object): The message.
"""
for client in self.registered_clients:
try: client(plugin, data)
except: self.logger.exception("Exception while sending plugin data to client")
class InstalledEntryPoint(pkginfo.Installed):
def __init__(self, entry_point, metadata_version=None):
self.entry_point = entry_point
package = entry_point.module_name
pkginfo.Installed.__init__(self, package, metadata_version=metadata_version)
def read(self):
import sys
import glob
import warnings
opj = os.path.join
if self.package is not None:
package = self.package.__package__
if package is None:
package = self.package.__name__
project = pkg_resources.to_filename(pkg_resources.safe_name(self.entry_point.dist.project_name))
package_pattern = '%s*.egg-info' % package
project_pattern = '%s*.egg-info' % project
file = getattr(self.package, '__file__', None)
if file is not None:
candidates = []
def _add_candidate(where):
candidates.extend(glob.glob(where))
for entry in sys.path:
if file.startswith(entry):
_add_candidate(opj(entry, 'EGG-INFO')) # egg?
for pattern in (package_pattern, project_pattern): # dist-installed?
_add_candidate(opj(entry, pattern))
dir, name = os.path.split(self.package.__file__)
for pattern in (package_pattern, project_pattern):
_add_candidate(opj(dir, pattern))
_add_candidate(opj(dir, '..', pattern))
for candidate in candidates:
if os.path.isdir(candidate):
path = opj(candidate, 'PKG-INFO')
else:
path = candidate
if os.path.exists(path):
with open(path) as f:
return f.read()
warnings.warn('No PKG-INFO found for package: %s' % self.package_name)
class Plugin(object):
"""
The parent class of all plugin implementations.
.. attribute:: _identifier
The identifier of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_name
The name of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_version
The version of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _basefolder
The base folder of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _logger
The logger instance to use, with the logging name set to the :attr:`PluginManager.logging_prefix` of the
:class:`PluginManager` concatenated with :attr:`_identifier`. Injected by the plugin core system upon
initialization of the implementation.
"""
def initialize(self):
"""
Called by the plugin core after performing all injections. Override this to initialize your implementation.
"""
pass
def on_plugin_enabled(self):
pass
def on_plugin_disabled(self):
pass
class RestartNeedingPlugin(Plugin):
pass
class PluginNeedsRestart(Exception):
def __init__(self, name):
Exception.__init__(self)
self.name = name
self.message = "Plugin {name} cannot be enabled or disabled after system startup".format(**locals())
class PluginLifecycleException(Exception):
def __init__(self, name, reason, message):
Exception.__init__(self)
self.name = name
self.reason = reason
self.message = message.format(**locals())
def __str__(self):
return self.message
class PluginCantInitialize(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be initialized: {reason}")
class PluginCantEnable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be enabled: {reason}")
class PluginCantDisable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be disabled: {reason}")
| DueLaser/OctoPrint | src/octoprint/plugin/core.py | Python | gpl-3.0 | 42,591 |
import os
import json
import string
import random
import nova_api
def cinder_request(self,
url_detail,
request_type='get',
request_name=None,
data=None,
locust_name=None):
url = self.get_endpoint('volumev2')
if url_detail:
url = os.path.join(url, url_detail)
headers = {'X-Auth-Project-Id': self.keystone_tenant,
'X-Auth-Token': self.auth_token,
'Content-Type': 'application/json',
'Accept': 'application/json'}
if data:
response = getattr(self.client, request_type)(url,
headers=headers,
data=json.dumps(data),
name=locust_name)
else:
response = getattr(self.client, request_type)(url,
headers=headers,
name=locust_name)
self.output(url)
self.output("Response status code: %s" % response.status_code)
self.output("Response content: %s" % response.content)
return response
def cinder_get_volume_id(self):
""" Return a random volume from currently
available volumes
"""
response = cinder_request(self, 'volumes', 'get')
volume_list = json.loads(response.content)['volumes']
volume_id = random.choice([i['id'] for i in volume_list])
return volume_id
def cinder_get_snapshot_id(self):
""" Return a random snapshot from currently
available snapshots
"""
response = cinder_request(self, 'snapshots', 'get')
snapshot_list = json.loads(response.content)['snapshots']
snapshot_id = random.choice([i['id'] for i in snapshot_list])
return snapshot_id
def cinder_get_image_id(self):
""" Return a random image from currently
available images
"""
response = nova_api.nova_request(self, 'images', 'get')
image_list = json.loads(response.content)['images']
image_id = random.choice([i['id'] for i in image_list])
return image_id
def cinder_get_server_id(self):
response = nova_api.nova_request(self, 'servers', 'get')
server_list = json.loads(response.content)['servers']
server_id = random.choice([i['id'] for i in server_list])
return server_id
def list_volumes(self):
return cinder_request(self,
'volumes',
'get',
'cinder_list_volumes')
def list_volumes_detail(self):
return cinder_request(self,
'volumes/detail',
'get',
'cinder_list_volumes_detail')
def list_volume_detail(self, volume_id=None):
if not volume_id:
volume_id = cinder_get_volume_id(self)
return cinder_request(self,
'volumes/%s' % volume_id,
'get',
'cinder_list_volume_detail',
locust_name='volumes/[id]')
def list_volume_types(self):
return cinder_request(self,
'types',
'get',
'cinder_list_volume_types')
def list_snapshots(self):
return cinder_request(self, 'snapshots', 'get',
'cinder_list_snapshots')
def list_snapshots_detail(self):
return cinder_request(self,
'snapshots/detail',
'get',
'cinder_list_snapshots_detail')
def list_snapshot_detail(self, snapshot_id=None):
if not snapshot_id:
snapshot_id = cinder_get_snapshot_id(self)
return cinder_request(self,
'snapshots/%s' %snapshot_id,
'get',
'cinder_list_snapshot_detail',
locust_name='snapshots/[id]')
def list_images(self):
return cinder_request(self,
'images',
'get',
'cinder_list_images')
def list_images_detail(self):
return cinder_request(self,
'images/detail',
'get',
'cinder_list_images_detail')
def list_image_detail(self, image_id=None):
if not image_id:
# get available images and randomly
# choose one
image_id = cinder_get_image_id(self)
return cinder_request(self,
'images/%s' % image_id,
'get',
'cinder_list_image_detail',
locust_name='images/[id]')
def list_image_metadata(self, image_id=None):
if not image_id:
image_id = cinder_get_image_id(self)
return cinder_request(self,
'images/%s/metadata' % image_id,
'get',
'cinder_list_image_metadata',
locust_name='images/[id]/metadata')
def update_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = cinder_get_image_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'post',
'cinder_update_image_metadata',
data,
locust_name='images/[id]/metadata')
def overwrite_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = cinder_get_image_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'put',
'cinder_overwrite_image_metadata',
data,
locust_name='images/[id]/metadata')
def create_volume(self,
volume_id=None,
snapshot_id=None,
image_id=None,
description=None,
size=1,
name=None,
bootable=False,
metadata={}
):
if not name:
name = "volume-%s" % uuid.uuid4()
data = {
"volume": {
"source_volid": volume_id,
"snapshot_id": snapshot_id,
"description": description,
"size": size,
"name": name,
"imageRef": image_id,
"bootable": bootable,
"metadata": metadata
}
}
response = cinder_request(self,
'volumes',
'post',
'cinder_create_volume',
data)
return response
def delete_volume(self, volume_id):
cinder_request(self,
'volumes/%s' % volume_id,
'delete',
'cinder_delete_volume',
locust_name='volumes/[id]')
def create_snapshot(self,
volume_id=None,
name=None,
force=False,
description=None):
if not name:
name = "snapshot-%s" % uuid.uuid4()
if not volume_id:
volume_id = get_volume_id(self)
data = { "snapshot": {
"name": name,
"description": description,
"volume_id": volume_id,
"force": force
}
}
response = cinder_request(self,
'snapshots',
'post',
'cinder_create_snapshot',
data)
return response
def delete_snapshot(self, snapshot_id):
cinder_request(self,
'snapshots/%s' % snapshot_id,
'delete',
'cinder_delete_snapshot',
locust_name='volumes/[id]')
def resize_server(self, server_id, flavor_id=None):
data = {
"resize": {
"flavorRef": flavor_id
}
}
cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resize_server',
data,
locust_name='servers/[resize]/[id]')
def confirm_resize_server(self, server_id):
data = { "confirmResize": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_confirm_resize_server',
data,
locust_name='servers/[confirm_resize]/[id]')
def revert_resize_server(self, server_id):
data = { "revertResize": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resize_server',
data,
locust_name='servers/[revert_resize]/[id]')
def suspend_server(self, server_id):
data = { "suspend": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_suspend_server',
data,
locust_name='servers/[suspend]/[id]')
def resume_server(self, server_id):
data = { "resume": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resume_server',
data,
locust_name='servers/[resume]/[id]')
def update_server_metadata(self, server_id=None, metadata=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'servers/%s/metadata' % server_id,
'post',
'cinder_update_server_metadata',
data,
locust_name='servers/[id]/metadata')
def overwrite_server_metadata(self, server_id=None, metadata=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'servers/%s/metadata' % server_id,
'put',
'cinder_overwrite_server_metadata',
data,
locust_name='servers/[id]/metadata')
def list_flavors(self):
return cinder_request(self,
'flavors',
'get',
'cinder_list_flavors')
def create_flavor(self, name=None,
ram=128,
vcpus=1,
disk=0,
id='auto',
is_public=False):
data = {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"id": id,
"os-flavor-access:is_public": is_public
}
}
return cinder_request(self,
'flavors',
'post',
'cinder_create_flavor',
data)
def create_floating_ip(self, pool=None):
data = {}
if pool:
data['pool']= pool
return cinder_request(self,
'os-floating-ips',
'post',
'cinder_create_floating_ip',
data)
def delete_floating_ip(self, floating_ip_id=None):
if not floating_ip_id:
floating_ip_id = cinder_get_floating_ip_id(self)
return cinder_request(self,
'os-floating-ips/%s' % floating_ip_id,
'delete',
'cinder_delete_floating_ip',
locust_name='os-floating-ips/[floating-ip-id]')
def list_floating_ips(self):
return cinder_request(self,
'os-floating-ips',
'get',
'cinder_list_floating_ips')
def assign_floating_ip(self,
server_id=None,
floating_ip=None,
pool=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not floating_ip:
floating_ip = cinder_get_floating_ip(self)
data = {
"addFloatingIp": {
"address": floating_ip
}
}
if pool:
data['addFloatingIp']['pool']=pool
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_assign_floating_ip',
data,
locust_name='servers/[server_id]/[assign-floating-ip]')
| pcrews/rannsaka | test_files/lib/openstack/cinder_api.py | Python | apache-2.0 | 13,466 |
# __init__.py: Python image functions
# Copyright (C) 2008
# Associated Universities, Inc. Washington DC, USA.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning AIPS++ should be addressed as follows:
# Internet email: aips2-request@nrao.edu.
# Postal address: AIPS++ Project Office
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#
# $Id$
"""Python interface to the Casacore images module.
A `casacore image <../../casacore/doc/html/group__Images__module.html>`_
represents an astronomical image of arbitrary dimensionality.
Several image formats are recognized:
`casacore paged image <../../casacore/doc/html/classcasa_1_1PagedImage.html>`_
is the native casacore image format stored in a casacore table.
`HDF5 <http://www.hdfgroup.org/HDF5>`_
is the HDF5 format often used in the earth science community.
`FITS <http://heasarc.gsfc.nasa.gov/docs/software/fitsio/fitsio.html>`_
is the well-known astronomical FITS format
`miriad <http://www.atnf.csiro.au/computing/software/miriad>`_
is the format used by the radio-astronomical MIRIAD package.
The following functionality exists:
- get and put data (slices)
- get or put a mask
- get meta data like coordinates and history
- get, put, or search optional image attributes (as used for LOFAR)
- get statistics
- form a subimage
- form an image expression which is treated as an ordinary image
- regrid the image
- write the image to a FITS file
"""
# Make image interface available.
from .image import image
| casacore/python-casacore | casacore/images/__init__.py | Python | gpl-2.0 | 2,303 |
# sql/util.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from .. import exc, util
from .base import _from_objects, ColumnSet
from . import operators, visitors
from itertools import chain
from collections import deque
from .elements import BindParameter, ColumnClause, ColumnElement, \
Null, UnaryExpression, literal_column, Label
from .selectable import ScalarSelect, Join, FromClause, FromGrouping
from .schema import Column
join_condition = util.langhelpers.public_factory(
Join._join_condition,
".sql.util.join_condition")
# names that are still being imported from the outside
from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate
from .elements import _find_columns
from .ddl import sort_tables
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we dont want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == 'binary' and \
operators.is_comparison(element.operator):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections': False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or \
not operators.is_ordering_modifier(t.modifier)
):
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(
surface_selectables(right)
)
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam': visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_params(object):
"""A string view of bound parameters, truncating
display to the given number of 'multi' parameter sets.
"""
def __init__(self, params, batches):
self.params = params
self.batches = batches
def __repr__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > self.batches and \
isinstance(self.params[0], (list, dict, tuple)):
msg = " ... displaying %i of %i total bound parameter sets ... "
return ' '.join((
repr(self.params[:self.batches - 2])[0:-1],
msg % (self.batches, len(self.params)),
repr(self.params[-2:])[1:]
))
else:
return repr(self.params)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if isinstance(binary.left, BindParameter) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, BindParameter) \
and binary.right._identifying_key in nulls:
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary': visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary key"
from a selectable, by reducing the set of primary key columns present
in the the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
only_synonyms = kw.pop('only_synonyms', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and \
(not only_synonyms or \
c.name == col.name):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(chain(*[c.proxy_set
for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and \
(not only_synonyms or \
c.name == binary.left.name):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {'binary': visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def col_is(a, b):
#return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or \
not isinstance(binary.right, ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and \
isinstance(binary.right, Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary': visit_binary})
return pairs
class AliasedRow(object):
"""Wrap a RowProxy with a translation map.
This object allows a set of keys to be translated
to those present in a RowProxy.
"""
def __init__(self, row, map):
# AliasedRow objects don't nest, so un-nest
# if another AliasedRow was passed
if isinstance(row, AliasedRow):
self.row = row.row
else:
self.row = row
self.map = map
def __contains__(self, key):
return self.map[key] in self.row
def has_key(self, key):
return key in self
def __getitem__(self, key):
return self.row[self.map[key]]
def keys(self):
return self.row.keys()
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None,
include=None, exclude=None,
include_fn=None, exclude_fn=None,
adapt_on_names=False):
self.__traverse_options__ = {'stop_on': [selectable]}
self.selectable = selectable
if include:
assert not include_fn
self.include_fn = lambda e: e in include
else:
self.include_fn = include_fn
if exclude:
assert not exclude_fn
self.exclude_fn = lambda e: e in exclude
else:
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded,
_seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(equiv,
require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
magic_flag = False
def replace(self, col):
if not self.magic_flag and isinstance(col, FromClause) and \
self.selectable.is_derived_from(col):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Provides the ability to "wrap" this ClauseAdapter
around another, a columns dictionary which returns
adapted elements given an original, and an
adapted_row() factory.
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, include=None,
exclude=None, adapt_required=False):
ClauseAdapter.__init__(self, selectable, equivalents, include, exclude)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
self.adapt_required = adapt_required
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__ = self.__dict__.copy()
ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col)
ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause)
ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list)
ac.columns = util.populate_column_dict(ac._locate_col)
return ac
adapt_clause = ClauseAdapter.traverse
adapt_list = ClauseAdapter.copy_and_process
def _wrap(self, local, wrapped):
def locate(col):
col = local(col)
return wrapped(col)
return locate
def _locate_col(self, col):
c = self._corresponding_column(col, True)
if c is None:
c = self.adapt_clause(col)
# anonymize labels in case they have a hardcoded name
if isinstance(c, Label):
c = c.label(None)
# adapt_required used by eager loading to indicate that
# we don't trust a result row column that is not translated.
# this is to prevent a column from being interpreted as that
# of the child row in a self-referential scenario, see
# inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
if self.adapt_required and c is col:
return None
return c
def adapted_row(self, row):
return AliasedRow(row, self.columns)
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
| jessekl/flixr | venv/lib/python2.7/site-packages/sqlalchemy/sql/util.py | Python | mit | 19,769 |
import jwt
import re
from tornado.gen import coroutine
from tornado.httpclient import AsyncHTTPClient
from os import path, getcwd
from api.model.models import User, UserActivation
from api.Crypto import hash_password
import functools
def authenticated(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
response = {'Error': "Token is invalid."}
self.set_status(401, 'Error')
self.set_header("Access-Control-Allow-Origin", "*")
self.write(response)
return
return method(self, *args, **kwargs)
return wrapper
# Decode a JWT token and return the results.
def validate_token(jwt_token, secret, algorithm):
try:
if jwt_token is None:
return None
payload = jwt.decode(jwt_token, secret, algorithms=[algorithm])
return payload
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return None
@coroutine
def fetch_coroutine(url):
http_client = AsyncHTTPClient()
response = yield http_client.fetch(url)
return response.body
# TODO is the extension always jpg?
@coroutine
def download_avatar(url, username):
data = yield fetch_coroutine(url)
current_dir = getcwd()
output_file_name = path.join(current_dir, "static/avatars/") + username + ".jpg"
save_file(output_file_name, data)
return username + ".jpg"
def save_file(path, data):
with open(path, "bw") as f:
f.write(data)
def uglify_username(username):
# Remove all non-word characters (everything except numbers and letters)
username = re.sub(r"[^\w\s]", '', username)
# Replace all runs of whitespace with a single dash
username = re.sub(r"\s+", '-', username)
return username
def get_oauth_settings(settings):
settings = {
"facebook": {
"key": settings["facebook_api_key"],
"secret": settings["facebook_api_secret"]
},
"google": {
"key": settings["google_oauth_key"],
"secret": settings["google_oauth_secret"]
}
}
return settings
def do_save_user(user_to_save, session):
# TODO: document this.
user = User()
user.username = user_to_save["username"]
user.password = hash_password(user_to_save["password"])
user.fullname = user_to_save["name"]
user.email = user_to_save['email']
user.valid = False # A user is not valid until his/her email has ben verified.
user.avatar = "_default_avatar.png"
session.add(user)
session.commit()
return user
def save_activation_info(activation_code, user, session):
# Save activation info.
user_activation = UserActivation()
user_activation.code = activation_code
user_activation.user_id = user.id
session.add(user_activation)
session.commit()
| fdemian/Morpheus | api/Utils.py | Python | bsd-2-clause | 2,877 |
import sys
import optparse
import subprocess
import random
import pdb
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 40})
import math
import numpy as np
import scipy.io
a2_10 = np.loadtxt('2min3RCT_taus_a1.0',dtype=int)
t2_10 = np.loadtxt('2min3RCT_taus_time_a1.0',dtype=int)
a2_15 = np.loadtxt('2min3RCT_taus_a1.5',dtype=int)
t2_15 = np.loadtxt('2min3RCT_taus_time_a1.5',dtype=int)
a2_26 = np.loadtxt('2min3RCT_taus_a2.6',dtype=int)
t2_26 = np.loadtxt('2min3RCT_taus_time_a2.6',dtype=int)
a1_10 = np.loadtxt('1min3RCT_taus_a1.0',dtype=int)
t1_10 = np.loadtxt('1min3RCT_taus_time_a1.0',dtype=int)
a1_15 = np.loadtxt('1min3RCT_taus_a1.5',dtype=int)
t1_15 = np.loadtxt('1min3RCT_taus_time_a1.5',dtype=int)
a1_26 = np.loadtxt('1min3RCT_taus_a2.6',dtype=int)
t1_26 = np.loadtxt('1min3RCT_taus_time_a2.6',dtype=int)
ss = [2400]*len(t2_10)
ts = np.subtract(t2_10,1200)
print '2 min cycle -----------'
print 'Max for a = 1.0: ' + str(max(a2_10))
print 'Max for a = 1.5: ' + str(max(a2_15))
print 'Max for a = 2.6: ' + str(max(a2_26))
print '1 min cycle -----------'
print 'Max for a = 1.0: ' + str(max(a1_10))
print 'Max for a = 1.5: ' + str(max(a1_15))
print 'Max for a = 2.6: ' + str(max(a1_26))
# compare all the 2min cycles with different accelerations
plt.figure(1)
m1, = plt.plot(np.subtract(t2_10,1200),a2_10,label=r'$a=1.0 \: m/s^2$',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_15,1200),a2_15,label=r'$a=1.5 \: m/s^2$',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
m3, = plt.plot(np.subtract(t2_26,1200),a2_26,label=r'$a=2.6 \: m/s^2$',linestyle='-',color='b',linewidth=3,marker='o',markersize=7)
plt.legend(handles=[m1,m2,m3],loc='best',fontsize=25)
# compare all the 1min cycles with different accelerations
plt.figure(2)
m1, = plt.plot(np.subtract(t1_10,1200),a1_10,label=r'$a=1.0 \: m/s^2$',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t1_15,1200),a1_15,label=r'$a=1.5 \: m/s^2$',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
m3, = plt.plot(np.subtract(t1_26,1200),a1_26,label=r'$a=2.6 \: m/s^2$',linestyle='-',color='b',linewidth=3,marker='o',markersize=7)
plt.legend(handles=[m1,m2,m3],loc='best',fontsize=25)
# compare the two diff cycles but same acceleration
plt.figure(3)
m1, = plt.plot(np.subtract(t1_10,1200),a1_10,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_10,1200),a2_10,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
plt.legend(handles=[m1,m2],loc='best',fontsize=25)
plt.figure(4)
m1, = plt.plot(np.subtract(t1_15,1200),a1_15,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_15,1200),a2_15,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
plt.legend(handles=[m1,m2],loc='best',fontsize=25)
plt.figure(5)
m1, = plt.plot(np.subtract(t1_26,1200),a1_26,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_26,1200),a2_26,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
plt.legend(handles=[m1,m2],loc='best',fontsize=25)
plt.show() | ucbtrans/sumo-project | examples/timingPlan_simulation/Throughput/plots4pravin/Deceleration_3/5050_with_platooning/RCT3/tau_plots.py | Python | bsd-2-clause | 3,321 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTags
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_generated_datacatalog_v1beta1_DataCatalog_ListTags_async]
from google.cloud import datacatalog_v1beta1
async def sample_list_tags():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.ListTagsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tags(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END datacatalog_generated_datacatalog_v1beta1_DataCatalog_ListTags_async]
| googleapis/python-datacatalog | samples/generated_samples/datacatalog_generated_datacatalog_v1beta1_data_catalog_list_tags_async.py | Python | apache-2.0 | 1,540 |
from collections import deque
from lcdui import common
from lcdui.ui import widget
import array
import time
class Frame(object):
def __init__(self, ui):
self._ui = ui
self._widgets = {}
self._position = {}
self._span = {}
self._screen_buffer = ScreenBuffer(self.rows(), self.cols())
self.onInitialize()
def BuildWidget(self, widget_cls, name=None, row=0, col=0, span=None, **kwargs):
widget_obj = widget_cls(self, **kwargs)
if name is None:
name = widget_obj
self.AddWidget(widget_obj, name, row, col, span)
return widget_obj
def rows(self):
"""Returns the number of rows in the frame."""
return self._ui.rows()
def cols(self):
"""Returns the number of columns in the frame."""
return self._ui.cols()
def onInitialize(self):
pass
def AddWidget(self, widget_obj, name, row=0, col=0, span=None):
"""Adds a widget to the current frame.
Args:
widget_obj: the widget to be added
name: the name of the widget
row: the row position of the widget
col: the column position of the widget
span: the character mask for the widget (or None if no mask)
"""
self._widgets[name] = widget_obj
self._position[name] = (row, col)
self._span[name] = span or max(0, self.cols() - col)
def GetWidget(self, name):
return self._widgets.get(name)
def RemoveWidget(self, name):
"""Removes the widget with the given name."""
del self._widgets[name]
del self._position[name]
del self._span[name]
def Paint(self):
"""Causes a repaint to happen, updating any internal buffers."""
for name, w in self._widgets.iteritems():
outstr = w.Paint()
row, col = self._position[name]
span = self._span[name]
self._screen_buffer.Write(array.array('c', outstr), row, col, span)
return self._screen_buffer
class TextFrame(Frame):
def __init__(self, ui, title='', lines=None):
Frame.__init__(self, ui)
self._title = title
if lines is None:
lines = []
self._lines = lines
self._UpdateText()
def _UpdateText(self):
lineno = 0
if self._title:
title_text = '_' + self._title
lineno = 1
if len(title_text) < (self.cols() - 1):
title_text += '_'*(self.cols() - len(title_text) - 1)
self.BuildWidget(widget.LineWidget, name='line0',
row=0, col=0, contents=title_text)
idx = 0
for lineno in xrange(lineno, self.rows()):
if idx < len(self._lines):
content = self._lines[idx]
else:
content = ''
idx += 1
line_name = 'line%i' % lineno
self.BuildWidget(widget.LineWidget, 'line%i' % lineno,
row=lineno, col=0, contents=content)
def SetTitle(self, title):
self._title = title
self._UpdateText()
def AddLine(self, line):
self._lines.append(str(line))
self._UpdateText()
class MultiFrame(Frame):
def __init__(self, ui):
Frame.__init__(self, ui)
self._inner_frames = deque()
self._display_time = {}
self._last_rotate = None
def AddWidget(self, widget_obj, name, row=0, col=0, span=None):
raise NotImplementedError
def GetWidget(self, name):
raise NotImplementedError
def RemoveWidget(self, name):
raise NotImplementedError
def frames(self):
return self._inner_frames
def AddFrame(self, frame, display_time):
self._inner_frames.append(frame)
self._display_time[frame] = display_time
def RemoveFrame(self, frame):
self._inner_frames.remove(frame)
del self._display_time[frame]
def Paint(self):
if not self._inner_frames:
return ''
now = time.time()
if self._last_rotate:
active_time = now - self._last_rotate
else:
self._last_rotate = now
active_time = 0
curr = self._inner_frames[0]
if len(self._inner_frames) > 1:
max_time = self._display_time[curr]
if active_time > max_time:
self._inner_frames.rotate(-1)
self._last_rotate = now
return curr.Paint()
class MenuFrame(Frame):
def onInitialize(self):
self._show_back = False
self._items = []
self._cursor_pos = 0
self._window_pos = 0
self._window_size = self.rows() - 1
self._title_widget = self.BuildWidget(widget.LineWidget, row=0, col=0)
self.setTitle('')
self._item_widgets = []
for i in xrange(self._window_size):
w = self.BuildWidget(widget.LineWidget, row=i+1, col=0)
self._item_widgets.append(w)
self._rebuildMenu()
def showBack(self, enable):
self._show_back = enable
self._rebuildMenu()
def addItem(self, key, value):
self._items.append((key, value))
self._rebuildMenu()
def scrollUp(self):
if self._cursor_pos == 0:
return
self._cursor_pos -= 1
self._updateWindowPos()
self._rebuildMenu()
def scrollDown(self):
if (self._cursor_pos + 1) == len(self._items):
return
self._cursor_pos += 1
self._updateWindowPos()
self._rebuildMenu()
def _rebuildMenu(self):
items = self._items[self._window_pos:self._window_pos+self._window_size]
num_blank = self._window_size - len(items)
symbol_up = self._ui.GetSymbol(common.SYMBOL.MENU_LIST_UP)
symbol_down = self._ui.GetSymbol(common.SYMBOL.MENU_LIST_DOWN)
symbol_cursor = self._ui.GetSymbol(common.SYMBOL.MENU_CURSOR)
for item_pos in xrange(len(items)):
item_id, item_value = items[item_pos]
w = self._item_widgets[item_pos]
w.set_contents(item_value)
for blank_pos in xrange(len(items), self._window_size):
w = self._item_widgets[blank_pos]
w.set_contents('')
# draw cursor
for i in xrange(len(self._item_widgets)):
w = self._item_widgets[i]
if i == (self._cursor_pos % self._window_size):
w.set_prefix(symbol_cursor + '|')
else:
w.set_prefix(' |')
w.set_postfix('| ')
if self._window_pos > 0:
self._item_widgets[0].set_postfix('|' + symbol_up)
if (self._window_pos + self._window_size) < len(self._items):
self._item_widgets[-1].set_postfix('|' + symbol_down)
def _updateWindowPos(self):
self._window_pos = self._cursor_pos - (self._cursor_pos % self._window_size)
def setTitle(self, title):
prefix = ''
symbol_back = self._ui.GetSymbol(common.SYMBOL.FRAME_BACK)
if self._show_back:
postfix = '_' + symbol_back + '_'
else:
postfix = ''
avail = self.cols()
title_str = title
if len(title_str) < avail:
title_str += '_' * (avail - len(title_str))
self._title_widget.set_contents(title_str)
self._title_widget.set_prefix(prefix)
self._title_widget.set_postfix(postfix)
def onLoad(self, lcd):
pass
class ScreenBuffer:
def __init__(self, rows, cols):
self._rows = rows
self._cols = cols
self._array = array.array('c', [' '] * (rows * cols))
def __eq__(self, other):
if isinstance(other, ScreenMatrix):
return self._array == other._array
return False
def array(self):
return self._array
def _AllocNewArray(self):
return array.array('c', [' '] * (self._rows * self._cols))
def _GetOffset(self, row, col):
return row*self._cols + col
def Clear(self):
self._array = self._AllocNewArray()
def Write(self, data, row, col, span):
""" replace data at row, col in this matrix """
assert row in range(self._rows)
assert col in range(self._cols)
start = self._GetOffset(row, col)
datalen = min(len(data), span)
end = start + datalen
self._array[start:end] = data[:datalen]
def __str__(self):
return self._array.tostring()
| RockingRolli/pylcdui | lcdui/ui/frame.py | Python | gpl-2.0 | 7,655 |
# -*- coding: utf-8 -*-
"""Calculates the current version number.
If possible, uses output of “git describe” modified to conform to the
versioning scheme that setuptools uses (see PEP 386). Releases must be
labelled with annotated tags (signed tags are annotated) of the following
format:
v<num>(.<num>)+ [ {a|b|c|rc} <num> (.<num>)* ]
If “git describe” returns an error (likely because we're in an unpacked copy
of a release tarball, rather than a git working copy), or returns a tag that
does not match the above format, version is read from RELEASE-VERSION file.
To use this script, simply import it your setup.py file, and use the results
of getVersion() as your package version:
import version
setup(
version=version.getVersion(),
.
.
.
)
This will automatically update the RELEASE-VERSION file. The RELEASE-VERSION
file should *not* be checked into git but it *should* be included in sdist
tarballs (as should version.py file). To do this, run:
echo include RELEASE-VERSION version.py >>MANIFEST.in
echo RELEASE-VERSION >>.gitignore
With that setup, a new release can be labelled by simply invoking:
git tag -s v1.0
Taken from: https://gist.github.com/mina86/8782771
"""
__author__ = ('Douglas Creager <dcreager@dcreager.net>',
'Michal Nazarewicz <mina86@mina86.com>')
__license__ = 'This file is placed into the public domain.'
__maintainer__ = 'Michal Nazarewicz'
__email__ = 'mina86@mina86.com'
__all__ = ('getVersion')
import re
import subprocess
import sys
RELEASE_VERSION_FILE = 'RELEASE-VERSION'
# http://www.python.org/dev/peps/pep-0386/
_PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?'
_PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (_PEP386_SHORT_VERSION_RE)
_GIT_DESCRIPTION_RE = r'^v(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (_PEP386_SHORT_VERSION_RE)
def readGitVersion():
try:
proc = subprocess.Popen(('git', 'describe', '--long',
'--match', 'v[0-9]*.*'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, _ = proc.communicate()
if proc.returncode:
return None
ver = data.splitlines()[0].strip()
except:
return None
if not ver:
return None
m = re.search(_GIT_DESCRIPTION_RE, ver)
if not m:
sys.stderr.write('version: git description (%s) is invalid, ignoring\n' % ver)
return None
commits = int(m.group('commits'))
if not commits:
return m.group('ver')
else:
return '%s.post%d.dev%d' % (m.group('ver'), commits, int(m.group('sha'), 16))
def readReleaseVersion():
try:
fd = open(RELEASE_VERSION_FILE)
try:
ver = fd.readline().strip()
finally:
fd.close()
if not re.search(_PEP386_VERSION_RE, ver):
sys.stderr.write('version: release version (%s) is invalid, will use it anyway\n' % ver)
return ver
except:
return None
def writeReleaseVersion(version):
fd = open(RELEASE_VERSION_FILE, 'w')
fd.write('%s\n' % version)
fd.close()
def getVersion():
release_version = readReleaseVersion()
version = readGitVersion() or release_version
if not version:
raise ValueError('Cannot find the version number')
if version != release_version:
writeReleaseVersion(version)
return version
if __name__ == '__main__':
print getVersion()
| jcollie/otx | version.py | Python | gpl-3.0 | 3,527 |
# © 2008-2020 Dorin Hongu <dhongu(@)gmail(.)com
# See README.rst file on addons root folder for license details
{
"name": "Romania - Invoice Report ",
"summary": "Localizare Terrabit",
"version": "14.0.3.0.3",
"author": "Dorin Hongu," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-romania",
"license": "AGPL-3",
"category": "Generic Modules",
"depends": [
"base",
"account",
"l10n_ro_config",
"purchase",
# "deltatech_watermark"
],
"data": [
"views/invoice_report.xml",
"views/voucher_report.xml",
"views/payment_report.xml",
# 'views/account_invoice_view.xml',
"views/account_voucher_report.xml",
"views/account_bank_statement_view.xml",
"views/statement_report.xml",
# 'views/res_partner_view.xml',
],
}
| dhongu/l10n-romania | l10n_ro_invoice_report/__manifest__.py | Python | agpl-3.0 | 887 |
# -*- coding: utf-8 -*-
from setuptools import setup
VERSION = '0.1.3'
setup(
name='django-compoundqueryset',
packages=[
'djcompoundqueryset',
],
version=VERSION,
description='Allows for creation of compound querysets in Django.',
url='https://github.com/brianwawok/django-compoundqueryset',
download_url='https://github.com/brianwawok/django-compoundqueryset/' + VERSION,
maintainer='Brian Wawok',
maintainer_email='bwawok@gmail.com',
install_requires=[
'setuptools',
'Django >= 1.8',
],
platforms=['Any'],
keywords=['django', 'compound', 'queryset'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Django',
],
)
| brianwawok/django-compoundqueryset | setup.py | Python | mit | 1,249 |
import codecs
import os
from setuptools import setup
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
setup(
name="django-queued-storage",
use_scm_version=True,
setup_requires=['setuptools_scm'],
url='https://github.com/jazzband/django-queued-storage',
license='BSD',
description="Queued remote storage for Django.",
long_description=read('README.rst'),
author='Jannis Leidel',
author_email='jannis@leidel.info',
packages=['queued_storage'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
],
install_requires=[
'six>=1.10.0',
'django-appconf >= 0.4',
'packaging==16.8',
],
zip_safe=False,
)
| jezdez/django-queued-storage | setup.py | Python | bsd-3-clause | 1,190 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('assessment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StaffWorkflow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scorer_id', models.CharField(max_length=40, db_index=True)),
('course_id', models.CharField(max_length=40, db_index=True)),
('item_id', models.CharField(max_length=128, db_index=True)),
('submission_uuid', models.CharField(unique=True, max_length=128, db_index=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('grading_completed_at', models.DateTimeField(null=True, db_index=True)),
('grading_started_at', models.DateTimeField(null=True, db_index=True)),
('cancelled_at', models.DateTimeField(null=True, db_index=True)),
('assessment', models.CharField(max_length=128, null=True, db_index=True)),
],
options={
'ordering': ['created_at', 'id'],
},
),
]
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/openassessment/assessment/migrations/0002_staffworkflow.py | Python | agpl-3.0 | 1,376 |
from phystricks import *
def UneCellule():
def Sigma(dep,leng):
sigma=[dep]
d=dep
for l in leng :
d=d+l
sigma.append(d)
return sigma
pspict,fig = SinglePicture("UneCellule")
a1=1
l1=[1.2,1.5,0.5,1,1]
sigma1=Sigma(a1,l1)
b1=a1+sum(l1)
a2=2
l2=[0.5,1.5,1]
sigma2=Sigma(a2,l2)
b2=a2+sum(l2)
for i in range(len(sigma1)):
dist=0.3+(1-(-1)**i)*0.15
x=sigma1[i]
P=Point(x,0)
if i == 0 :
P.put_mark(dist,-90,"$a_1=y_{10}$",automatic_place=pspict)
elif i == len(sigma1)-1:
P.put_mark(dist,-90,"$b_1=y_{1%s}$"%str(i),automatic_place=pspict)
else:
P.put_mark(dist,-90,"$y_{1%s}$"%str(i),automatic_place=pspict)
seg1=Segment(P,Point(x,a2))
seg1.parameters.style="dotted"
seg2=Segment(Point(x,a2),Point(x,b2))
pspict.DrawGraphs(P,seg1,seg2)
for i in range(len(sigma2)):
y=sigma2[i]
P=Point(0,y)
if i == 0 :
P.put_mark(0.9,180,"$a_2=y_{20}$",automatic_place=pspict)
elif i == len(sigma2)-1:
P.put_mark(0.9,180,"$b_2=y_{2%s}$"%str(i),automatic_place=pspict)
else:
P.put_mark(0.4,180,"$y_{2%s}$"%str(i),automatic_place=pspict)
seg1=Segment(P,Point(a1,y))
seg1.parameters.style="dotted"
seg2=Segment(Point(a1,y),Point(b1,y))
pspict.DrawGraphs(P,seg1,seg2)
cellule=Rectangle( Point(sigma1[3],sigma2[1]),Point(sigma1[4],sigma2[2]) )
cellule.parameters.filled()
cellule.parameters.fill.color="lightgray"
pspict.DrawGraphs(cellule)
pspict.axes.no_graduation()
pspict.DrawDefaultAxes()
pspict.dilatation(1)
fig.conclude()
fig.write_the_file()
| Naereen/mazhe | phystricksUneCellule.py | Python | gpl-3.0 | 1,522 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Euclidean embedding models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from kg_hyp_emb.models.base import KGModel
from kg_hyp_emb.utils import euclidean as euc_utils
import numpy as np
import tensorflow as tf
class BaseE(KGModel):
"""Base model class for Euclidean embeddings."""
def get_rhs(self, input_tensor):
rhs = self.entity(input_tensor[:, 2])
return rhs
def get_candidates(self,):
cands = self.entity.embeddings
return cands
def similarity_score(self, lhs, rhs, eval_mode):
if self.sim == 'dot':
if eval_mode:
score = tf.matmul(lhs, tf.transpose(rhs))
else:
score = tf.reduce_sum(lhs * rhs, axis=-1, keepdims=True)
elif self.sim == 'dist':
score = -euc_utils.euc_sq_distance(lhs, rhs, eval_mode)
else:
raise AttributeError('Similarity function {} not recognized'.format(
self.sim))
return score
class CTDecomp(BaseE):
"""Canonical tensor decomposition."""
def __init__(self, sizes, args):
super(CTDecomp, self).__init__(sizes, args)
self.sim = 'dot'
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
return tf.multiply(entity, rel)
class TransE(BaseE):
"""Euclidean translations."""
def __init__(self, sizes, args):
super(TransE, self).__init__(sizes, args)
self.sim = 'dist'
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
return entity + rel
class RotE(BaseE):
"""2x2 Givens rotations."""
def __init__(self, sizes, args):
super(RotE, self).__init__(sizes, args)
self.rel_diag = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='rotation_weights')
self.sim = 'dist'
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
rel_diag = self.rel_diag(input_tensor[:, 1])
return euc_utils.givens_rotations(rel_diag, entity) + rel
class RefE(BaseE):
"""2x2 Givens reflections."""
def __init__(self, sizes, args):
super(RefE, self).__init__(sizes, args)
self.rel_diag = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='reflection_weights')
self.sim = 'dist'
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
rel_diag = self.rel_diag(input_tensor[:, 1])
return euc_utils.givens_reflection(rel_diag, entity) + rel
class MurE(BaseE):
"""Diagonal scaling."""
def __init__(self, sizes, args):
super(MurE, self).__init__(sizes, args)
self.rel_diag = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='scaling_weights')
self.sim = 'dist'
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
rel_diag = self.rel_diag(input_tensor[:, 1])
return rel_diag * entity + rel
class AttE(BaseE):
"""Euclidean attention model that combines reflections and rotations."""
def __init__(self, sizes, args):
super(AttE, self).__init__(sizes, args)
self.sim = 'dist'
# reflection
self.ref = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='reflection_weights')
# rotation
self.rot = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='rotation_weights')
# attention
self.context_vec = tf.keras.layers.Embedding(
input_dim=sizes[1],
output_dim=self.rank,
embeddings_initializer=self.initializer,
embeddings_regularizer=self.rel_regularizer,
name='context_embeddings')
self.scale = tf.keras.backend.ones(1) / np.sqrt(self.rank)
def get_reflection_queries(self, entity, ref):
queries = euc_utils.givens_reflection(ref, entity)
return tf.reshape(queries, (-1, 1, self.rank))
def get_rotation_queries(self, entity, rot):
queries = euc_utils.givens_rotations(rot, entity)
return tf.reshape(queries, (-1, 1, self.rank))
def get_queries(self, input_tensor):
entity = self.entity(input_tensor[:, 0])
rel = self.rel(input_tensor[:, 1])
rot = self.rot(input_tensor[:, 1])
ref = self.ref(input_tensor[:, 1])
context_vec = self.context_vec(input_tensor[:, 1])
ref_q = self.get_reflection_queries(entity, ref)
rot_q = self.get_rotation_queries(entity, rot)
# self-attention mechanism
cands = tf.concat([ref_q, rot_q], axis=1)
context_vec = tf.reshape(context_vec, (-1, 1, self.rank))
att_weights = tf.reduce_sum(
context_vec * cands * self.scale, axis=-1, keepdims=True)
att_weights = tf.nn.softmax(att_weights, axis=-1)
res = tf.reduce_sum(att_weights * cands, axis=1) + rel
return res
| tensorflow/neural-structured-learning | research/kg_hyp_emb/models/euclidean.py | Python | apache-2.0 | 6,107 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPU's with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os.path
import re
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# With 8 Tesla K40's and a batch size = 256, the following setup achieves
# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def _tower_loss(images, labels, num_classes, scope):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(dataset):
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
images, labels = image_processing.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Split the batch of images and labels for towers.
images_splits = tf.split(0, FLAGS.num_gpus, images)
labels_splits = tf.split(0, FLAGS.num_gpus, labels)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
scope)
# Calculate the gradients for the batch of data on this ImageNet
# tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
# Another possiblility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(
FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True))
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 500 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
| Cyber-Neuron/inception_v3 | inception/inception/inception_train.py | Python | apache-2.0 | 15,221 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pifpaf import drivers
class EtcdDriver(drivers.Driver):
DEFAULT_PORT = 2379
DEFAULT_PEER_PORT = 2380
DEFAULT_CLUSTER = False
def __init__(self, port=DEFAULT_PORT,
peer_port=DEFAULT_PEER_PORT,
cluster=DEFAULT_CLUSTER,
**kwargs):
"""Create a new etcd server."""
super(EtcdDriver, self).__init__(**kwargs)
self.port = port
self.peer_port = peer_port
self.cluster = cluster
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for etcd"},
{"param_decls": ["--peer-port"],
"type": int,
"default": cls.DEFAULT_PEER_PORT,
"help": "port to use for etcd peeres"},
{"param_decls": ["--cluster"],
"is_flag": True,
"default": cls.DEFAULT_CLUSTER,
"help": "activate etcd cluster"},
]
def _setUp(self):
super(EtcdDriver, self)._setUp()
if self.cluster:
http_urls = [("http://localhost:%d" % (p + 1),
"http://localhost:%d" % p)
for p in (self.port, self.port + 2, self.port + 4)]
for i, (peer_url, client_url) in enumerate(http_urls):
tempdir = os.path.join(self.tempdir, str(i))
c, _ = self._exec([
"etcd",
"--data-dir", tempdir,
"--name", "pifpaf%d" % i,
"--listen-client-urls", client_url,
"--advertise-client-urls", client_url,
"--listen-peer-urls", peer_url,
"--initial-advertise-peer-urls", peer_url,
"--initial-cluster-token", "etcd-cluster-pifpaf",
"--initial-cluster", ",".join("pifpaf%d=%s" % (i, peer_url)
for i, (peer_url, client_url)
in enumerate(http_urls)),
"--initial-cluster-state", "new",
], wait_for_line="listening for client requests on")
endpoints = ",".join(client_url
for peer_url, client_url in http_urls)
else:
client_url = "http://localhost:%d" % self.port
peer_url = "http://localhost:%d" % self.peer_port
c, _ = self._exec(["etcd",
"--data-dir", self.tempdir,
"--listen-peer-urls", peer_url,
"--listen-client-urls", client_url,
"--advertise-client-urls", client_url],
wait_for_line="listening for client requests on")
endpoints = client_url
self.putenv("ETCD_PORT", str(self.port))
self.putenv("ETCD_PEER_PORT", str(self.peer_port))
self.putenv("HTTP_URL", "http://localhost:%d" % self.port)
self.putenv("URL", "etcd://localhost:%d" % self.port)
self.putenv("ETCDCTL_ENDPOINTS", endpoints, True)
| sileht/pifpaf | pifpaf/drivers/etcd.py | Python | apache-2.0 | 3,783 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is a wrapper around the GN binary that is pulled from Google
Cloud Storage when you sync Chrome. The binaries go into platform-specific
subdirectories in the source tree.
This script makes there be one place for forwarding to the correct platform's
binary. It will also automatically try to find the gn binary when run inside
the chrome source tree, so users can just type "gn" on the command line
(normally depot_tools is on the path)."""
import gclient_utils
import os
import subprocess
import sys
def main(args):
bin_path = gclient_utils.GetBuildtoolsPlatformBinaryPath()
if not bin_path:
print >> sys.stderr, ('gn.py: Could not find checkout in any parent of '
'the current path.\nThis must be run inside a '
'checkout.')
return 1
gn_path = os.path.join(bin_path, 'gn' + gclient_utils.GetExeSuffix())
if not os.path.exists(gn_path):
print >> sys.stderr, 'gn.py: Could not find gn executable at: %s' % gn_path
return 2
else:
return subprocess.call([gn_path] + sys.argv[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| michalliu/chromium-depot_tools | gn.py | Python | bsd-3-clause | 1,319 |
## {{{ http://code.activestate.com/recipes/492223/ (r1)
def protect_utf8(wrapped_function, encoding='UTF-8'):
"""Temporarily convert a UTF-8 string to Unicode to prevent breakage.
protect_utf8 is a function decorator that can prevent naive
functions from breaking UTF-8.
If the wrapped function takes a string, and that string happens to be valid
UTF-8, convert it to a unicode object and call the wrapped function. If a
conversion was done and if a unicode object was returned, convert it back
to a UTF-8 string.
The wrapped function should take a string as its first parameter and it may
return an object of the same type. Anything else is optional. For
example:
def truncate(s):
return s[:1]
Pass "encoding" if you want to protect something other than UTF-8.
Ideally, we'd have unicode objects everywhere, but sometimes life is not
ideal. :)
"""
def proxy_function(s, *args, **kargs):
unconvert = False
if isinstance(s, str):
try:
s = s.decode(encoding)
unconvert = True
except UnicodeDecodeError:
pass
ret = wrapped_function(s, *args, **kargs)
if unconvert and isinstance(ret, unicode):
ret = ret.encode(encoding)
return ret
return proxy_function
def truncate(s, length=1, etc="..."):
"""Truncate a string to the given length.
If truncation is necessary, append the value of "etc".
This is really just a silly test.
"""
if len(s) < length:
return s
else:
return s[:(length - len(etc))] + etc
truncate = protect_utf8(truncate) # I'm stuck on Python 2.3.
if __name__ == '__main__':
assert (truncate('\xe3\x82\xa6\xe3\x82\xb6\xe3\x83\x86', etc="") ==
'\xe3\x82\xa6')
assert truncate('abc') == 'a...'
assert truncate(u'\u30a0\u30b1\u30c3', etc="") == u'\u30a0'
## end of http://code.activestate.com/recipes/492223/ }}}
| ParsonsAMT/Myne | datamining/libs/truncate.py | Python | agpl-3.0 | 1,967 |
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for movielens_dataset.py."""
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
from reconstruction.movielens import movielens_dataset
class MovielensDatasetTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.ratings_df = pd.DataFrame(
{
'UserID': [0, 0, 1, 1, 1, 1],
'MovieID': [0, 1, 2, 3, 2, 2],
'Rating': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
'Timestamp': [1, 2, 1, 4, 5, 3]
},
columns=['UserID', 'MovieID', 'Rating', 'Timestamp'])
def _verify_server_data_arrays(self, users, movies, ratings):
"""Ensures produced data arrays have expected dtype."""
self.assertIsInstance(users, np.ndarray)
self.assertEqual(users.dtype, np.int64)
self.assertIsInstance(movies, np.ndarray)
self.assertEqual(movies.dtype, np.int64)
self.assertIsInstance(ratings, np.ndarray)
self.assertEqual(ratings.dtype, np.float32)
def test_split_ratings_df(self):
train_ratings_df, val_ratings_df, test_ratings_df = movielens_dataset.split_ratings_df(
self.ratings_df, train_fraction=0.5, val_fraction=0.25)
self.assertLen(train_ratings_df, 3)
# 1 example from user 1 goes to train, the other goes to test, since
# `val_fraction` upper bounds the number of examples provided for each user
# and remaining examples go to the test set.
self.assertLen(val_ratings_df, 1)
self.assertLen(test_ratings_df, 2)
self.assertListEqual(list(train_ratings_df['UserID']), [0, 1, 1])
self.assertListEqual(list(train_ratings_df['MovieID']), [0, 2, 2])
self.assertListEqual(list(train_ratings_df['Rating']), [1.0, 3.0, 6.0])
self.assertListEqual(list(train_ratings_df['Timestamp']), [1, 1, 3])
self.assertListEqual(list(val_ratings_df['UserID']), [1])
self.assertListEqual(list(val_ratings_df['MovieID']), [3])
self.assertListEqual(list(val_ratings_df['Rating']), [4.0])
self.assertListEqual(list(val_ratings_df['Timestamp']), [4])
self.assertListEqual(list(test_ratings_df['UserID']), [0, 1])
self.assertListEqual(list(test_ratings_df['MovieID']), [1, 2])
self.assertListEqual(list(test_ratings_df['Rating']), [2.0, 5.0])
self.assertListEqual(list(test_ratings_df['Timestamp']), [2, 5])
def test_split_ratings_df_raises_error(self):
with self.assertRaises(ValueError):
movielens_dataset.split_ratings_df(
self.ratings_df, train_fraction=0.5, val_fraction=1.25)
def test_split_ratings_df_no_val(self):
train_ratings_df, _, test_ratings_df = movielens_dataset.split_ratings_df(
self.ratings_df, train_fraction=0.5, val_fraction=0.0)
self.assertLen(train_ratings_df, 3)
self.assertLen(test_ratings_df, 3)
self.assertListEqual(list(train_ratings_df['UserID']), [0, 1, 1])
self.assertListEqual(list(train_ratings_df['MovieID']), [0, 2, 2])
self.assertListEqual(list(train_ratings_df['Rating']), [1.0, 3.0, 6.0])
self.assertListEqual(list(train_ratings_df['Timestamp']), [1, 1, 3])
self.assertListEqual(list(test_ratings_df['UserID']), [0, 1, 1])
self.assertListEqual(list(test_ratings_df['MovieID']), [1, 3, 2])
self.assertListEqual(list(test_ratings_df['Rating']), [2.0, 4.0, 5.0])
self.assertListEqual(list(test_ratings_df['Timestamp']), [2, 4, 5])
def test_split_ratings_df_fraction_floor(self):
"""Ensures edge-case behavior is as expected."""
train_ratings_df, val_ratings_df, test_ratings_df = movielens_dataset.split_ratings_df(
self.ratings_df, train_fraction=0.5, val_fraction=0.49)
self.assertLen(train_ratings_df, 3)
# 1 example from user 1 goes to train, the other goes to test, since
# `val_fraction` upper bounds the number of examples provided for each user
# and remaining examples go to the test set, i.e. floor(2 * 0.49) = 0 val
# examples.
self.assertLen(val_ratings_df, 1)
self.assertLen(test_ratings_df, 2)
self.assertListEqual(list(train_ratings_df['UserID']), [0, 1, 1])
self.assertListEqual(list(train_ratings_df['MovieID']), [0, 2, 2])
self.assertListEqual(list(train_ratings_df['Rating']), [1.0, 3.0, 6.0])
self.assertListEqual(list(train_ratings_df['Timestamp']), [1, 1, 3])
self.assertListEqual(list(val_ratings_df['UserID']), [1])
self.assertListEqual(list(val_ratings_df['MovieID']), [3])
self.assertListEqual(list(val_ratings_df['Rating']), [4.0])
self.assertListEqual(list(val_ratings_df['Timestamp']), [4])
self.assertListEqual(list(test_ratings_df['UserID']), [0, 1])
self.assertListEqual(list(test_ratings_df['MovieID']), [1, 2])
self.assertListEqual(list(test_ratings_df['Rating']), [2.0, 5.0])
self.assertListEqual(list(test_ratings_df['Timestamp']), [2, 5])
def test_get_user_examples(self):
user_examples = movielens_dataset.get_user_examples(self.ratings_df, 0)
self.assertCountEqual(user_examples, [(0, 0, 1.0), (0, 1, 2.0)])
def test_get_user_examples_max_examples(self):
user_examples = movielens_dataset.get_user_examples(
self.ratings_df, 0, max_examples_per_user=1)
# Ensure number of examples is now 1. The exact example may vary due to
# shuffling.
self.assertLen(user_examples, 1)
def test_create_tf_dataset_for_user(self):
tf_dataset = movielens_dataset.create_tf_dataset_for_user(
self.ratings_df, 0, personal_model=True, batch_size=1)
dataset_elements = list(tf_dataset.as_numpy_iterator())
expected_elements = [
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32))
]
self.assertLen(dataset_elements, 2)
self.assertCountEqual(dataset_elements, expected_elements)
def test_create_tf_dataset_for_user_local_epochs(self):
tf_dataset = movielens_dataset.create_tf_dataset_for_user(
self.ratings_df,
0,
personal_model=True,
batch_size=1,
num_local_epochs=3)
dataset_elements = list(tf_dataset.as_numpy_iterator())
expected_elements = [
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32)),
]
self.assertLen(dataset_elements, 6)
self.assertCountEqual(dataset_elements, expected_elements)
def test_create_tf_dataset_for_user_batch_size(self):
tf_dataset = movielens_dataset.create_tf_dataset_for_user(
self.ratings_df, 0, personal_model=True, batch_size=3)
dataset_elements = list(tf_dataset.as_numpy_iterator())
expected_elements = [
collections.OrderedDict(
x=np.array([0, 1], dtype=np.int64),
y=np.array([1.0, 2.0], dtype=np.float32)),
]
self.assertLen(dataset_elements, 1)
self.assertCountEqual(dataset_elements[0], expected_elements[0])
def test_create_tf_dataset_for_user_non_personal_model(self):
tf_dataset = movielens_dataset.create_tf_dataset_for_user(
self.ratings_df, 0, personal_model=False, batch_size=1)
dataset_elements = list(tf_dataset.as_numpy_iterator())
expected_elements = [
collections.OrderedDict(
x=(np.array([0], dtype=np.int64), np.array([0], dtype=np.int64)),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=(np.array([0], dtype=np.int64), np.array([1], dtype=np.int64)),
y=np.array([2.0], dtype=np.float32))
]
self.assertLen(dataset_elements, 2)
self.assertCountEqual(dataset_elements, expected_elements)
def test_create_tf_dataset_for_user_max_examples_epochs(self):
tf_dataset = movielens_dataset.create_tf_dataset_for_user(
self.ratings_df,
0,
personal_model=True,
batch_size=1,
max_examples_per_user=1,
num_local_epochs=2)
dataset_elements = list(tf_dataset.as_numpy_iterator())
# Ensure each epoch has 1 element. Exact element is random.
self.assertLen(dataset_elements, 2)
def test_create_tf_datasets(self):
tf_datasets = movielens_dataset.create_tf_datasets(
self.ratings_df,
personal_model=True,
batch_size=1,
max_examples_per_user=None,
num_local_epochs=2)
user1_elements = list(tf_datasets[0].as_numpy_iterator())
user2_elements = list(tf_datasets[1].as_numpy_iterator())
expected_user1_elements = [
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([0], dtype=np.int64),
y=np.array([1.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([1], dtype=np.int64),
y=np.array([2.0], dtype=np.float32)),
]
expected_user2_elements = [
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([3.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([3], dtype=np.int64),
y=np.array([4.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([5.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([6.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([3.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([3], dtype=np.int64),
y=np.array([4.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([5.0], dtype=np.float32)),
collections.OrderedDict(
x=np.array([2], dtype=np.int64),
y=np.array([6.0], dtype=np.float32)),
]
self.assertLen(user1_elements, 4)
self.assertLen(user2_elements, 8)
self.assertCountEqual(user1_elements, expected_user1_elements)
self.assertCountEqual(user2_elements, expected_user2_elements)
def test_create_tf_datasets_batch_size_num_examples(self):
tf_datasets = movielens_dataset.create_tf_datasets(
self.ratings_df,
personal_model=True,
batch_size=2,
max_examples_per_user=1)
user1_elements = list(tf_datasets[0].as_numpy_iterator())
user2_elements = list(tf_datasets[1].as_numpy_iterator())
expected_user1_element = collections.OrderedDict(
x=np.array([0, 1], dtype=np.int64),
y=np.array([1.0, 2.0], dtype=np.float32))
expected_user2_element = collections.OrderedDict(
x=np.array([2, 3], dtype=np.int64),
y=np.array([3.0, 4.0], dtype=np.float32))
self.assertLen(user1_elements, 1)
self.assertLen(user1_elements, 1)
self.assertCountEqual(user1_elements[0], expected_user1_element)
self.assertCountEqual(user2_elements[0], expected_user2_element)
def test_create_tf_datasets_non_personal_model(self):
tf_datasets = movielens_dataset.create_tf_datasets(
self.ratings_df,
personal_model=False,
batch_size=2,
max_examples_per_user=1,
num_local_epochs=1)
user1_elements = list(tf_datasets[0].as_numpy_iterator())
user2_elements = list(tf_datasets[1].as_numpy_iterator())
expected_user1_element = collections.OrderedDict(
x=(np.array([0, 0], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
y=np.array([1.0, 2.0], dtype=np.float32))
expected_user2_element = collections.OrderedDict(
x=(np.array([1, 1], dtype=np.int64), np.array([2, 3], dtype=np.int64)),
y=np.array([3.0, 4.0], dtype=np.float32))
self.assertLen(user1_elements, 1)
self.assertLen(user1_elements, 1)
self.assertCountEqual(user1_elements[0], expected_user1_element)
self.assertCountEqual(user2_elements[0], expected_user2_element)
def test_split_tf_datasets(self):
tf_datasets = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(9),
tf.data.Dataset.range(8),
tf.data.Dataset.range(7),
]
train_datasets, val_datasets, test_datasets = movielens_dataset.split_tf_datasets(
tf_datasets, train_fraction=.5, val_fraction=.25)
self.assertLen(train_datasets, 2)
self.assertLen(val_datasets, 1)
self.assertLen(test_datasets, 1)
def test_split_tf_datasets_empty_val(self):
tf_datasets = [
tf.data.Dataset.range(10),
tf.data.Dataset.range(9),
tf.data.Dataset.range(8),
tf.data.Dataset.range(7),
]
train_datasets, val_datasets, test_datasets = movielens_dataset.split_tf_datasets(
tf_datasets, train_fraction=.5, val_fraction=0.0)
self.assertLen(train_datasets, 2)
self.assertEmpty(val_datasets)
self.assertLen(test_datasets, 2)
def test_create_merged_np_arrays(self):
users, movies, ratings = movielens_dataset.create_merged_np_arrays(
self.ratings_df, max_examples_per_user=None, shuffle_across_users=False)
self._verify_server_data_arrays(users, movies, ratings)
self.assertAllEqual(np.shape(users), [6, 1])
self.assertAllEqual(np.shape(movies), [6, 1])
self.assertAllEqual(np.shape(ratings), [6, 1])
zipped_merged_data = zip(users, movies, ratings)
self.assertCountEqual(zipped_merged_data, [(0, 0, 1.0), (0, 1, 2.0),
(1, 2, 3.0), (1, 3, 4.0),
(1, 2, 5.0), (1, 2, 6.0)])
def test_create_merged_np_arrays_max_examples_shuffle(self):
users, movies, ratings = movielens_dataset.create_merged_np_arrays(
self.ratings_df, max_examples_per_user=2, shuffle_across_users=True)
self._verify_server_data_arrays(users, movies, ratings)
self.assertAllEqual(np.shape(users), [4, 1])
self.assertAllEqual(np.shape(movies), [4, 1])
self.assertAllEqual(np.shape(ratings), [4, 1])
def test_create_user_split_np_arrays(self):
train_arrays, val_arrays, test_arrays = movielens_dataset.create_user_split_np_arrays(
self.ratings_df,
max_examples_per_user=None,
train_fraction=0.5,
val_fraction=0.25)
for arrays in [train_arrays, val_arrays, test_arrays]:
users, movies, ratings = arrays
self._verify_server_data_arrays(users, movies, ratings)
zipped_train_data = list(zip(*train_arrays))
zipped_val_data = list(zip(*val_arrays))
zipped_test_data = list(zip(*test_arrays))
# Which user appears in train/test will depend on the random seed used for
# user shuffling, but we fix that here.
self.assertCountEqual(zipped_train_data, [(1, 2, 3.0), (1, 3, 4.0),
(1, 2, 5.0), (1, 2, 6.0)])
self.assertEmpty(zipped_val_data)
self.assertCountEqual(zipped_test_data, [(0, 0, 1.0), (0, 1, 2.0)])
def test_create_user_split_np_arrays_val_data(self):
train_arrays, val_arrays, test_arrays = movielens_dataset.create_user_split_np_arrays(
self.ratings_df,
max_examples_per_user=None,
train_fraction=0.5,
val_fraction=0.5)
for arrays in [train_arrays, val_arrays, test_arrays]:
users, movies, ratings = arrays
self._verify_server_data_arrays(users, movies, ratings)
zipped_train_data = list(zip(*train_arrays))
zipped_val_data = list(zip(*val_arrays))
zipped_test_data = list(zip(*test_arrays))
# Which user appears in train/test will depend on the random seed used for
# user shuffling, but we fix that here.
self.assertCountEqual(zipped_train_data, [(1, 2, 3.0), (1, 3, 4.0),
(1, 2, 5.0), (1, 2, 6.0)])
self.assertCountEqual(zipped_val_data, [(0, 0, 1.0), (0, 1, 2.0)])
self.assertEmpty(list(zipped_test_data))
def test_create_user_split_np_arrays_max_examples_per_user(self):
train_arrays, val_arrays, test_arrays = movielens_dataset.create_user_split_np_arrays(
self.ratings_df,
max_examples_per_user=2,
train_fraction=0.5,
val_fraction=0.5)
for arrays in [train_arrays, val_arrays, test_arrays]:
users, movies, ratings = arrays
self._verify_server_data_arrays(users, movies, ratings)
zipped_train_data = list(zip(*train_arrays))
zipped_val_data = list(zip(*val_arrays))
zipped_test_data = list(zip(*test_arrays))
self.assertLen(zipped_train_data, 2)
self.assertLen(zipped_val_data, 2)
self.assertEmpty(zipped_test_data)
if __name__ == '__main__':
tf.test.main()
| google-research/federated | reconstruction/movielens/movielens_dataset_test.py | Python | apache-2.0 | 17,985 |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="energenie",
version="1.0.1",
author="Ben Nuttall",
author_email="ben@raspberrypi.org",
description="Remotely control power sockets from the Raspberry Pi",
license="BSD",
keywords=[
"energenie",
"raspberrypi",
],
url="https://github.com/bennuttall/energenie",
packages=[
"energenie",
],
install_requires=[
"RPi.GPIO",
],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Home Automation",
"License :: OSI Approved :: BSD License",
],
)
| rjw57/energenie | setup.py | Python | bsd-3-clause | 756 |
from typing import Optional, Union
import osgeo
import numpy as np
import pandas as pd
try:
import xarray as xr
except ImportError:
pass
from .landslide.common import (
static_factor_of_safety,
rock_slope_static_factor_of_safety,
)
from .landslide.newmark import (
newmark_critical_accel,
newmark_displ_from_pga_M,
prob_failure_given_displacement,
)
from .landslide.rotational import rotational_critical_accel
def calc_newmark_soil_slide_single_event(
pga: Union[float, np.ndarray],
M: float,
slope: Union[float, np.ndarray],
cohesion: Union[float, np.ndarray],
friction_angle: Union[float, np.ndarray],
saturation_coeff: Union[float, np.ndarray] = 0.1,
slab_thickness: Union[float, np.ndarray] = 2.5,
soil_dry_density: Union[float, np.ndarray] = 1500.0,
water_density: float = 1000.0,
out_name=None,
) -> Union[float, np.ndarray]:
"""
"""
fs = static_factor_of_safety(
slope,
cohesion,
friction_angle,
saturation_coeff,
slab_thickness,
soil_dry_density,
water_density,
)
ca = newmark_critical_accel(fs, slope)
Dn = newmark_displ_from_pga_M(pga, ca, M)
if isinstance(Dn, xr.DataArray):
Dn.name = out_name
return Dn
def calc_newmark_soil_slide_event_set(
pga: Union[float, np.ndarray],
M: Union[float, np.ndarray],
slope: Union[float, np.ndarray],
cohesion: Union[float, np.ndarray],
friction_angle: Union[float, np.ndarray],
saturation_coeff: Union[float, np.ndarray] = 0.1,
slab_thickness: Union[float, np.ndarray] = 2.5,
soil_dry_density: Union[float, np.ndarray] = 1500.0,
water_density=1000.0,
) -> Union[float, np.ndarray]:
"""
"""
fs = static_factor_of_safety(
slope,
cohesion,
friction_angle,
saturation_coeff,
slab_thickness,
soil_dry_density,
water_density,
)
ca = newmark_critical_accel(fs, slope)
if isinstance(pga, xr.Dataset):
Dn = xr.Dataset(
{
k: newmark_displ_from_pga_M(da, ca, da.attrs["mag"])
for k, da in pga.data_vars.items()
}
)
# elif isinstance(pga, )
return Dn
def calc_rock_slope_failures():
pass
def calc_rotational_failures():
pass
def calculate_lateral_spreading():
pass
| gem/oq-engine | openquake/sep/calculators.py | Python | agpl-3.0 | 2,395 |
"""Test for Roles CLI
:Requirement: Role
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UsersRoles
:Assignee: dsynk
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import re
from math import ceil
from random import choice
import pytest
from fauxfactory import gen_string
from robottelo.cli.base import CLIDataBaseError
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.factory import make_filter
from robottelo.cli.factory import make_location
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_role
from robottelo.cli.factory import make_user
from robottelo.cli.filter import Filter
from robottelo.cli.role import Role
from robottelo.cli.settings import Settings
from robottelo.cli.user import User
from robottelo.constants import PERMISSIONS
from robottelo.constants import ROLES
from robottelo.datafactory import generate_strings_list
from robottelo.datafactory import parametrized
class TestRole:
"""Test class for Roles CLI"""
@pytest.mark.tier1
@pytest.mark.parametrize(
'name, new_name',
**parametrized(
list(zip(generate_strings_list(length=10), generate_strings_list(length=10)))
),
)
def test_positive_crud_with_name(self, name, new_name):
"""Create new role with provided name, update name and delete role by ID
:id: f77b8e84-e964-4007-b12b-142949134d8b
:parametrized: yes
:expectedresults: Role is created and has correct name, its name is updated
and then deleted by ID
:BZ: 1138553
:CaseImportance: Critical
"""
role = make_role({'name': name})
assert role['name'] == name
Role.update({'id': role['id'], 'new-name': new_name})
role = Role.info({'id': role['id']})
assert role['name'] == new_name
Role.delete({'id': role['id']})
with pytest.raises(CLIReturnCodeError):
Role.info({'id': role['id']})
@pytest.mark.tier1
@pytest.mark.upgrade
@pytest.mark.build_sanity
def test_positive_create_with_permission(self):
"""Create new role with a set of permission
:id: 7cb2b2e2-ad4d-41e9-b6b2-c0366eb09b9a
:expectedresults: Role is created and has correct set of permissions
:CaseImportance: Critical
"""
role = make_role()
# Pick permissions by its resource type
permissions = [
permission['name']
for permission in Filter.available_permissions({"search": "resource_type=Organization"})
]
# Assign filter to created role
make_filter({'role-id': role['id'], 'permissions': permissions})
assert set(Role.filters({'id': role['id']})[0]['permissions']) == set(permissions)
@pytest.mark.tier1
def test_positive_list_filters_by_id(self):
"""Create new role with a filter and list it by role id
:id: 6979ad8d-629b-481e-9d3a-8f3b3bca53f9
:expectedresults: Filter is listed for specified role
:CaseImportance: Critical
"""
role = make_role()
# Pick permissions by its resource type
permissions = [
permission['name']
for permission in Filter.available_permissions({"search": "resource_type=Organization"})
]
# Assign filter to created role
filter_ = make_filter({'role-id': role['id'], 'permissions': permissions})
assert role['name'] == filter_['role']
assert Role.filters({'id': role['id']})[0]['id'] == filter_['id']
@pytest.mark.tier1
def test_positive_list_filters_by_name(self):
"""Create new role with a filter and list it by role name
:id: bbcb3982-f484-4dde-a3ea-7145fd28ab1f
:expectedresults: Filter is listed for specified role
:CaseImportance: Critical
"""
role = make_role()
# Pick permissions by its resource type
permissions = [
permission['name']
for permission in Filter.available_permissions({"search": "resource_type=Organization"})
]
# Assign filter to created role
filter_ = make_filter({'role': role['name'], 'permissions': permissions})
assert role['name'] == filter_['role']
assert Role.filters({'name': role['name']})[0]['id'] == filter_['id']
@pytest.mark.tier1
def test_negative_list_filters_without_parameters(self):
"""Try to list filter without specifying role id or name
:id: 56cafbe0-d1cb-413e-8eac-0e01a3590fd2
:expectedresults: Proper error message is shown instead of SQL error
:CaseImportance: Critical
:BZ: 1296782
"""
with pytest.raises(CLIReturnCodeError) as err:
try:
Role.filters()
except CLIDataBaseError as err:
pytest.fail(err)
assert re.search('At least one of options .* is required', err.value.msg)
@pytest.fixture()
def make_role_with_permissions(self):
"""Create new role with a filter"""
role = make_role()
res_types = iter(PERMISSIONS.keys())
permissions = []
# Collect more than 20 different permissions
while len(permissions) <= 20:
permissions += [
permission['name']
for permission in Filter.available_permissions(
{"search": f"resource_type={next(res_types)}"}
)
]
# Create a filter for each permission
for perm in permissions:
make_filter({'role': role['name'], 'permissions': perm})
return {
'role': role,
'permissions': permissions,
}
@pytest.mark.tier1
@pytest.mark.upgrade
@pytest.mark.parametrize('per_page', [1, 5, 20])
def test_positive_list_filters_with_pagination(self, make_role_with_permissions, per_page):
"""Make sure filters list can be displayed with different items per
page value
:id: b9c7c6c1-70c2-4d7f-8d36-fa8613acc865
:BZ: 1428516
:expectedresults: `per-page` correctly sets amount of items displayed
per page, different `per-page` values divide a list into correct
number of pages
:CaseImportance: Critical
:parametrized: yes
"""
# Verify the first page contains exactly the same items count
# as `per-page` value
filters = Role.filters(
{'name': make_role_with_permissions['role']['name'], 'per-page': per_page}
)
assert len(filters) == per_page
# Verify pagination and total amount of pages by checking the
# items count on the last page
last_page = ceil(len(make_role_with_permissions['permissions']) / per_page)
filters = Role.filters(
{
'name': make_role_with_permissions['role']['name'],
'page': last_page,
'per-page': per_page,
}
)
assert len(filters) == (
len(make_role_with_permissions['permissions']) % per_page or per_page
)
@pytest.mark.tier1
@pytest.mark.upgrade
def test_positive_delete_cloned_builtin(self):
"""Clone a builtin role and attempt to delete it
:id: 1fd9c636-596a-4cb2-b100-de19238042cc
:BZ: 1426672
:expectedresults: role was successfully deleted
:CaseImportance: Critical
"""
role_list = Role.list({'search': f'name=\\"{choice(ROLES)}\\"'})
assert len(role_list) == 1
cloned_role = Role.clone({'id': role_list[0]['id'], 'new-name': gen_string('alphanumeric')})
Role.delete({'id': cloned_role['id']})
with pytest.raises(CLIReturnCodeError):
Role.info({'id': cloned_role['id']})
class TestSystemAdmin:
"""Test class for System Admin role end to end CLI"""
@pytest.fixture(scope='class', autouse=True)
def tearDown(self):
"""Will reset the changed value of settings"""
yield
Settings.set({'name': "outofsync_interval", 'value': "30"})
@pytest.mark.upgrade
@pytest.mark.tier3
def test_system_admin_role_end_to_end(self):
"""Test System admin role with a end to end workflow
:id: da6b3549-d1cf-44fc-869f-08d15d407fa2
:steps:
1. Create a System admin role user1
2. Login with the user1 and change global settings
"Out of sync interval" to 31
3. Create user2 with system admin role
4. Login with user2 to create a Organization
5. Clone a Org-admin role
6. Edit the Architecture Filter and search name = x86_64
7. Create a User with Cloned Org admin
8. Login with user.
:expectedresults:
1. User should be assigned with System Admin role.
2. User with sys admin role should be able to update settings
3. User with sys admin role should be able to create users and
assign Organizations to them.
4. System Admin role should be able to create Organization admins
5. User with sys admin role should be able to edit filters on roles
:CaseLevel: System
"""
org = make_org()
location = make_location()
common_pass = gen_string('alpha')
role = Role.info({'name': 'System admin'})
system_admin_1 = make_user(
{
'password': common_pass,
'organization-ids': org['id'],
'location-ids': location['id'],
}
)
User.add_role({'id': system_admin_1['id'], 'role-id': role['id']})
Settings.with_user(username=system_admin_1['login'], password=common_pass).set(
{'name': "outofsync_interval", 'value': "32"}
)
sync_time = Settings.list({'search': 'name=outofsync_interval'})[0]
# Asserts if the setting was updated successfully
assert '32' == sync_time['value']
# Create another System Admin user using the first one
system_admin = User.with_user(
username=system_admin_1['login'], password=common_pass
).create(
{
'auth-source-id': 1,
'firstname': gen_string('alpha'),
'lastname': gen_string('alpha'),
'login': gen_string('alpha'),
'mail': '{}@example.com'.format(gen_string('alpha')),
'password': common_pass,
'organizations': org['name'],
'role-ids': role['id'],
'locations': location['name'],
}
)
# Create the Org Admin user
org_role = Role.with_user(username=system_admin['login'], password=common_pass).clone(
{
'name': 'Organization admin',
'new-name': gen_string('alpha'),
'organization-ids': org['id'],
'location-ids': location['id'],
}
)
org_admin = User.with_user(username=system_admin['login'], password=common_pass).create(
{
'auth-source-id': 1,
'firstname': gen_string('alpha'),
'lastname': gen_string('alpha'),
'login': gen_string('alpha'),
'mail': '{}@example.com'.format(gen_string('alpha')),
'password': common_pass,
'organizations': org['name'],
'role-ids': org_role['id'],
'location-ids': location['id'],
}
)
# Assert if the cloning was successful
assert org_role['id'] is not None
org_role_filters = Role.filters({'id': org_role['id']})
search_filter = None
for arch_filter in org_role_filters:
if arch_filter['resource-type'] == 'Architecture':
search_filter = arch_filter
break
Filter.with_user(username=system_admin['login'], password=common_pass).update(
{'role-id': org_role['id'], 'id': arch_filter['id'], 'search': 'name=x86_64'}
)
# Asserts if the filter is updated
assert 'name=x86_64' in Filter.info({'id': search_filter['id']}).values()
org_admin = User.with_user(username=system_admin['login'], password=common_pass).info(
{'id': org_admin['id']}
)
# Asserts Created Org Admin
assert org_role['name'] in org_admin['roles']
assert org['name'] in org_admin['organizations']
| JacobCallahan/robottelo | tests/foreman/cli/test_role.py | Python | gpl-3.0 | 12,631 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import inspect
import os
import sys
import re
import traceback
from contextlib import contextmanager
from warnings import warn
import signal
from copy import deepcopy
import pytest
from _pytest.runner import pytest_runtest_makereport as orig_pytest_runtest_makereport
from _pytest.terminal import TerminalReporter
from six import StringIO
import jsonpatch
from functools import wraps, partial
from invenio_search.api import Query
from .models import CheckerRule
from .worker import (
RedisWorker,
StatusWorker,
make_fullpatch,
get_workers_with_unprocessed_results,
)
from eliot import (
Action,
Message,
start_action,
to_file,
Logger,
)
from .supervisor import (
_are_compatible
)
from .config import get_eliot_log_path
from .registry import reporters_files
eliot_log_path = get_eliot_log_path()
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# TERMINATION
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def die(rcv_signal, frame):
raise SystemExit
signal.signal(signal.SIGINT, die)
signal.signal(signal.SIGTERM, die)
def pytest_exception_interact(node, call, report):
"""Terminate execution on SystemExit.
This is a workaround for the fact that pytest/billiard interpret SIGTERM
sent to a celery thread to have come from the test function itself. We ask
pytest to handle this gracefully by raising Interrupted.
Not calling os._exit() here is important so that we don't break eventlet,
if in use.
:type node: :py:class:_pytest.main.Node
:type call: :py:class:_pytest.runner.CallInfo
:type report: :py:class:_pytest.runner.TestReport
"""
if isinstance(call.excinfo.value, SystemExit):
redis_worker = node.config.option.redis_worker
warn('Ending worker' + str(redis_worker.task_id))
# redis_worker._cleanup() raise node.session.Interrupted(True)
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ELIOT
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def start_action_dec(action_type, **dec_kwargs):
def real_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
redis_worker = Session.session.config.option.redis_worker
eliot_task_id = redis_worker.eliot_task_id
# print "~{} {}".format(eliot_task_id, action_type)
del Logger._destinations._destinations[:]
to_file(open(os.path.join(eliot_log_path, redis_worker.master.uuid + '.' + redis_worker.task_id), "ab"))
eliot_task = Action.continue_task(task_id=eliot_task_id)
with eliot_task:
with start_action(action_type=action_type,
worker_id=redis_worker.task_id,
**dec_kwargs):
func(*args, **kwargs)
redis_worker.eliot_task_id = eliot_task.serialize_task_id()
return wrapper
return real_decorator
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# COMMUNICATE WITH MASTER
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def pytest_collection_modifyitems(session, config, items):
"""Call when pytest has finished collecting items."""
_pytest_collection_modifyitems(session, config, items)
@start_action_dec(action_type='invenio_checker.conftest2.pytest_collection_modifyitems')
def _pytest_collection_modifyitems(session, config, items):
"""Report allowed recids and jsonpaths to master and await start.
:type session: :py:class:_pytest.main.Session
:type config: :py:class:_pytest.config.Config
:type items: list
"""
redis_worker = config.option.redis_worker
unique_functions_found = set((item.function for item in items))
if not unique_functions_found:
raise AssertionError(
"No check functions were found."
" Scroll up for exceptions that may have prevented collection!"
)
elif not len(unique_functions_found) == 1:
raise AssertionError(
"We support one check function per file. Found {0} instead."
.format(len(unique_functions_found))
)
item = items[0]
# Set allowed_paths and allowed_recids
if hasattr(item, 'cls'):
if hasattr(item.cls, 'allowed_paths'):
allowed_paths = item.cls.allowed_paths(
config.option.invenio_rule.arguments
)
else:
allowed_paths = set()
if hasattr(item.cls, 'allowed_recids'):
allowed_recids = item.cls.allowed_recids(
config.option.invenio_rule.arguments,
batch_recids(session),
all_recids(session),
search(session)
)
else:
allowed_recids = batch_recids(session)
if allowed_recids - all_recids(session):
raise AssertionError('Check requested recids that are not in the'
' database!')
# TODO `allowed_paths` must return jsonpointers (IETF RFC 6901)
redis_worker.allowed_paths = allowed_paths
redis_worker.allowed_recids = allowed_recids
def worker_conflicts_with_currently_running(worker):
foreign_running_workers = get_workers_with_unprocessed_results()
blockers = set()
for foreign in foreign_running_workers:
if not _are_compatible(worker, foreign):
blockers.add(foreign)
return blockers
redis_worker.status = StatusWorker.ready # XXX unused?
with start_action(action_type='checking for conflicting running workers'):
redis_worker.lock.get()
try:
blockers = worker_conflicts_with_currently_running(redis_worker)
if blockers:
Message.log(message_type='found conflicting workers', value=str(blockers))
print 'CONFLICT {} {}'.format(redis_worker.task_id, blockers)
redis_worker.retry_after_ids = {bl.task_id for bl in blockers}
del items[:]
else:
print 'RESUMING ' + str(redis_worker.task_id)
redis_worker.status = StatusWorker.running
finally:
redis_worker.lock.release()
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FIXTURES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def _warn_if_empty(func):
"""Print a warning if the given functions returns no results.
..note:: pytest relies on the signature function to set fixtures, in this
case `request`.
:type func: callable
"""
@wraps(func)
def _warn_if_empty(request):
"""
:type request: :py:class:_pytest.python.SubRequest
"""
ret = func(request)
if not ret:
warn(func.__name__ + " returned an empty set!")
return ret
return _warn_if_empty
def _request_to_config(request_or_config):
"""Resolve pytest config.
This is useful to make a function that, due to pytest, expects `request`,
work when called called from pytest itself or from a function that only had
access to `config`.
"""
try:
return request_or_config.config
except AttributeError:
return request_or_config
@pytest.fixture(scope="session")
def search(request):
"""Wrap `Query(request).search()`.
:type request: :py:class:_pytest.python.SubRequest
"""
def _query(query):
"""
:type query: str
"""
ret = Query(query).search()
ret.records = (get_record(request)(recid) for recid in ret.recids)
return ret
return _query
@pytest.fixture(scope="session")
def arguments(request):
"""Get the user-set arguments from the database."""
return request.config.option.invenio_rule.arguments
@pytest.fixture(scope="session")
def get_record(request):
"""Wrap `get_record` for record patch generation.
This function ensures that we
1) hit the database once per record,
2) maintain the latest, valid, modified version of the records,
3) return the same 'temporary' object reference per check.
:type request: :py:class:`_pytest.python.SubRequest`
"""
def _get_record(recid):
redis_worker = request.session.config.option.redis_worker
invenio_records = request.session.invenio_records
if recid not in invenio_records['original']:
invenio_records['original'][recid] = redis_worker.get_record_orig_or_mem(recid)
if recid not in invenio_records['modified']:
invenio_records['modified'][recid] = deepcopy(invenio_records['original'][recid])
if recid not in invenio_records['temporary']:
invenio_records['temporary'][recid] = invenio_records['modified'][recid]
return invenio_records['temporary'][recid]
return _get_record
@pytest.fixture(scope="session")
@_warn_if_empty
def all_recids(request):
"""Return all the recids this run is ever allowed to change.
:type request: :py:class:_pytest.python.SubRequest
"""
config = _request_to_config(request)
return config.option.redis_worker.master.all_recids
@pytest.fixture(scope="session")
@_warn_if_empty
def batch_recids(request):
"""Return the recids that were assigned to this worker.
:type request: :py:class:_pytest.python.SubRequest
:rtype: intbitset
"""
config = _request_to_config(request)
return config.option.redis_worker.bundle_requested_recids
@pytest.fixture(scope="function")
def log(request):
"""Wrap a logging function that informs the enabled reporters.
:type request: :py:class:_pytest.python.SubRequest
"""
def _log(user_readable_msg):
# current_function = request.node #<class '_pytest.python.Function'>
location_tuple = LocationTuple.from_stack(inspect.stack()[1])
for reporter in request.config.option.invenio_reporters:
reporter.report(user_readable_msg, location_tuple)
return _log
@pytest.fixture(scope="function")
def cfg_args(request):
"""Return arguments given to the task from the database configuration.
:type request: :py:class:_pytest.python.SubRequest
"""
return request.config.option.invenio_rule.arguments
@pytest.fixture(scope="function")
def record(request):
"""Return a single record from this batch.
:type request: :py:class:_pytest.python.SubRequest
"""
record_id = request.param
return get_record(request)(record_id)
def pytest_generate_tests(metafunc):
"""Parametrize the check function with `record`.
:type metafunc: :py:class:_pytest.python.Metafunc
"""
if 'record' in metafunc.fixturenames:
metafunc.parametrize("record", batch_recids(metafunc.config),
indirect=True)
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# RESULT HANDLING
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def pytest_sessionstart(session):
"""Initialize session-wide variables for record management and caching.
:type session: :py:class:`_pytest.main.Session`
"""
return _pytest_sessionstart(session)
def _pytest_sessionstart(session):
assert not hasattr(session, 'invenio_records')
session.invenio_records = {'original': {}, 'modified': {}, 'temporary': {}}
# Modified actually means "pull out"
Session.session = session
class Session(object):
session = None
def _patches_of_last_execution():
"""Get the full_patches generated during the last check.
..note::
`invenio_records` is populated by the `get_record` function.
"""
session = Session.session
invenio_records = session.invenio_records
redis_worker = session.config.option.redis_worker
def get_full_patches():
"""Return all the record patches resulting from the last run."""
for recid, modified_record in invenio_records['temporary'].items():
original_record = invenio_records['original'][recid]
patch = jsonpatch.make_patch(original_record, modified_record)
if patch:
yield make_fullpatch(recid, hash(original_record), patch, redis_worker.task_id)
for full_patch in get_full_patches():
del invenio_records['temporary'][full_patch['recid']]
yield full_patch
# Runs after exception has been reported to the reporter, after every single fine-grained step
def pytest_runtest_logreport(report):
"""
TODO
"""
return _pytest_runtest_logreport(report)
# @start_action_dec(action_type='invenio_checker:conftest2:pytest_runtest_logreport')
def _pytest_runtest_logreport(report):
session = Session.session
invenio_records = session.invenio_records
if report.when == 'teardown' and report.outcome == 'passed':
temp_keys = invenio_records['temporary'].keys()
for recid in temp_keys:
invenio_records['modified'][recid] = invenio_records['temporary'].pop(recid)
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# OPTIONS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
@lru_cache(maxsize=2)
def _load_rule_from_db(rule_name):
"""Translate the name of the rule set to this task to a database object.
:type rule_name: str
"""
return CheckerRule.query.get(rule_name)
def pytest_addoption(parser):
"""Parse arguments given to the command line of this batch.
:type parser: :py:class:`_pytest.config.Parser`
"""
parser.addoption("--invenio-task-id", action="store", type=RedisWorker,
help="get task id", dest='redis_worker')
parser.addoption("--invenio-master-id", action="store", type=str,
help="get master id", dest='invenio_master_id')
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# REPORTER CALLER
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
def pytest_sessionfinish(session, exitstatus):
"""whole test run finishes.
TODO: Upload
"""
return _pytest_sessionfinish(session, exitstatus)
@start_action_dec(action_type='invenio_checker:conftest2:_pytest_sessionfinish')
def _pytest_sessionfinish(session, exitstatus):
with start_action(action_type='moving added patches to redis'):
invenio_records = session.invenio_records
redis_worker = session.config.option.redis_worker
for recid, modified_record in invenio_records['modified'].items():
original_record = invenio_records['original'][recid]
patch = jsonpatch.make_patch(original_record, modified_record)
if patch:
# FIXME: Hash is wrong
redis_worker.patch_to_redis(
make_fullpatch(
recid, hash(original_record),
patch, redis_worker.task_id)
)
class LocationTuple(object):
@staticmethod
def from_report_location(report_location):
"""Convert a `report_location` to a `LocationTuple`.
:type report_location: tuple
"""
fspath, lineno, domain = report_location
return os.path.abspath(fspath), lineno, domain
@staticmethod
def from_stack(stack):
"""Convert a `stack` to a `LocationTuple`.
:type stack: tuple
"""
frame, filename, line_number, function_name, lines, index = stack
function_name = frame.f_code.co_name # 'check_fail'
try:
argvalues = inspect.getargvalues(frame)
first_argument = argvalues.locals[argvalues.args[0]]
class_name = first_argument.__class__.__name__ # CheckWhatever
except IndexError:
domain = function_name
else:
domain = '{0}.{1}'.format(class_name, function_name)
return filename, line_number, domain
class InvenioReporter(TerminalReporter):
ansi_escape = re.compile(r'\x1b[^m]*m')
def __init__(self, reporter):
"""Initialize TerminalReporter without features we don't need.
:type reporter: :py:class:`_pytest.terminal.TerminalReporter`
"""
TerminalReporter.__init__(self, reporter.config)
@contextmanager
def new_tw(self):
"""Scoped terminal writer to get output of designated functions.
..note:: Will catch any exceptions raised while in the scope and append
them to the stream. This way one can call deprecated functions and
actually get a report about it.
"""
class StrippedStringIO(StringIO):
"""StringIO that strips ansi characters."""
def write(self, message):
"""Escape all ansi characters from input."""
message = InvenioReporter.ansi_escape.sub('', message)
StringIO.write(self, message) # StringIO is old-style
tmp_stream = StrippedStringIO()
old_file = self._tw._file # pylint: disable=no-member
self._tw._file = tmp_stream # pylint: disable=no-member
def getvalue():
"""Return everything that is in the stream."""
tmp_stream.seek(0)
return tmp_stream.getvalue()
exc_info = None
try:
yield getvalue
except Exception:
exc_info = sys.exc_info()
finally:
if exc_info:
formatted_exception = ''.join(traceback.format_exception(*exc_info))
tmp_stream.write('\nException raised while collecting description:\n')
tmp_stream.write(formatted_exception)
self._tw._file = old_file # pylint: disable=no-member
def pytest_collectreport(self, report):
"""Report failure during colltion.
:type report: :py:class:_pytest.runner.CollectReport
"""
TerminalReporter.pytest_collectreport(self, report)
if report.failed:
self.report_failure(report, when='collect')
def pytest_runtest_logreport(self, report):
"""Report failure during check run.
:type report: :py:class:_pytest.runner.TestReport
"""
if hasattr(report, 'wasxfail'):
return
if report.failed:
self.report_failure(report)
else:
pass
# TODO: record checked records to DB. No, don't do this before commit.
def pytest_runtest_logstart(self, nodeid, location):
"""No-op terminal-specific prints."""
pass
def summary_failures(self):
"""No-op terminal-specific prints."""
pass
def summary_errors(self):
"""No-op terminal-specific prints."""
pass
def report_failure(self, report, when=None):
"""Dispatch all possible types of failures to enabled reporters.
:type when: None or str
:type report: :py:class:_pytest.runner.BaseReport
"""
when = when or report.when
assert when in ('collect', 'setup', 'call', 'teardown')
with self.new_tw() as getvalue:
self._outrep_summary(report) # pylint: disable=no-member
outrep_summary = getvalue()
# Output, should use celery?
location_tuple = LocationTuple.from_report_location(report.location)
try:
exc_info = (
report.excinfo.type,
report.excinfo.value,
report.excinfo.traceback[0]._rawentry
)
except AttributeError:
exc_info = sys.exc_info()
formatted_exception = ''.join(traceback.format_exception(*exc_info))
# Inform all enabled reporters
patches = tuple(_patches_of_last_execution())
for reporter in pytest.config.option.invenio_reporters: # pylint: disable=no-member
report_exception = partial(reporter.report_exception, when, outrep_summary,
location_tuple, formatted_exception=formatted_exception)
if patches:
report_exception(patches=patches)
else:
report_exception()
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# INTIALIZE, REGISTER REPORTERS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
@pytest.mark.trylast
def pytest_configure(config):
"""Register our report handlers' handler.
:type config: :py:class:`_pytest.config.Config`
"""
def get_reporters(invenio_rule):
return [reporter.module.get_reporter(config.option.invenio_rule.name)
for reporter in invenio_rule.reporters]
config.option.invenio_execution = \
config.option.redis_worker.master.get_execution()
config.option.invenio_rule = config.option.invenio_execution.rule
config.option.invenio_reporters = get_reporters(config.option.invenio_rule)
# Get the current terminal reporter
terminalreporter = config.pluginmanager.getplugin('terminalreporter')
# Unregister it
config.pluginmanager.unregister(terminalreporter)
# Add our own to act as a gateway
invenioreporter = InvenioReporter(terminalreporter)
config.pluginmanager.register(invenioreporter, 'invenioreporter')
def pytest_runtest_makereport(item, call):
"""Override in order to inject `excinfo` for internalerror.
:type item: :py:class:`_pytest.python.Function`
:type call: :py:class:`_pytest.runner.CallInfo`
"""
excinfo = call.excinfo
try:
result = orig_pytest_runtest_makereport(item, call)
finally:
result.excinfo = excinfo
return result
# Namespace manipulation
# class InvenioStorage(object):
# def __init__(self):
# self.records = None
# self.reporters = None
# @property
# def records(self):
# return pytest.config.
# def pytest_namespace():
# pass
# # pytest has special handling for dicts, so we use a custom class instead
# # invenio_storage = InvenioStorage()
# # return {'invenio_storage': invenio_storage}
# @pytest.mark.trylast
# def pytest_cmdline_main(config):
# # Get the marker
# import ipdb; ipdb.set_trace()
# pytest.invenio_storage.records = config.option.records.split(',')
# pytest.invenio_storage.reporters = config.option.reporters.split(',')
| dset0x/invenio-checker-old | invenio_checker/conftest2.py | Python | gpl-2.0 | 24,901 |
# -*- coding: utf-8 -*-
from local_settings import *
# Application definition
INSTALLED_APPS = (
'wpadmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'lugar',
'encuestas',
'clima',
'smart_selects',
'multiselectfield',
'selectable',
'sorl.thumbnail',
'el_pagination',
#'debug_toolbar',
'import_export',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#borrar una ves usado
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mapafinca.urls'
#SHOW_TOOLBAR_CALLBACK = True
#INTERNAL_IPS = '127.0.0.1'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mapafinca.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-ni'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
WPADMIN = {
'admin': {
#'admin_site': 'mapafinca.admin',
'title': 'Django panel de administración',
'menu': {
'top': 'wpadmin.menu.menus.BasicTopMenu',
'left': 'wpadmin.menu.menus.BasicLeftMenu',
},
'dashboard': {
'breadcrumbs': True,
},
'custom_style': STATIC_URL + 'wpadmin/css/themes/light.css',
}
}
CKEDITOR_JQUERY_URL = 'https://code.jquery.com/jquery-2.1.3.min.js'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'my_cache_table',
# }
# }
ENDLESS_PAGINATION_PER_PAGE = 18
| CARocha/mapafinca | mapafinca/settings.py | Python | mit | 3,040 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._isprivate import IsPrivate
#-------------------------------------------------------------------------
# "Media marked private"
#-------------------------------------------------------------------------
class MediaPrivate(IsPrivate):
"""Media marked private"""
name = _('Media objects marked private')
description = _("Matches Media objects that are indicated as private")
| SNoiraud/gramps | gramps/gen/filters/rules/media/_mediaprivate.py | Python | gpl-2.0 | 1,633 |
#!/usr/bin/env python3
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# EGSE interfaces - Unit Tests *
#******************************************************************************
import sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import EGSE.EDEN
import UTIL.SYS, UTIL.TASK
import UnitTest.testData
####################
# global variables #
####################
# EDEN client is a singleton
s_client = None
###########
# classes #
###########
# =============================================================================
class ModelTask(UTIL.TASK.ProcessingTask):
"""Subclass of UTIL.TASK.ProcessingTask"""
# ---------------------------------------------------------------------------
def __init__(self):
"""Initialise attributes only"""
UTIL.TASK.ProcessingTask.__init__(self, isParent=True)
# ---------------------------------------------------------------------------
def notifyCommand(self, argv, extraData):
"""Callback for processing the input arguments"""
if len(argv) > 0:
# decode the command
cmd = argv[0].upper()
if cmd == "H" or cmd == "HELP":
self.helpCmd(argv)
elif cmd == "Q" or cmd == "QUIT":
self.quitCmd(argv)
elif cmd == "1" or cmd == "TC_SPACE":
self.tcSpaceCmd(argv)
elif cmd == "2" or cmd == "TC_SCOE":
self.tcScoeCmd(argv)
elif cmd == "3" or cmd == "CMD_EXEC":
self.cmdExecCmd(argv)
else:
LOG_WARNING("Invalid command " + argv[0])
return 0
# ---------------------------------------------------------------------------
def helpCmd(self, argv):
"""Decoded help command"""
LOG("Available commands:")
LOG("-------------------")
LOG("")
LOG("h | help .......provides this information")
LOG("q | quit .......terminates the application")
LOG("1 | tc_space ...send TC via EDEN (TC,SPACE)")
LOG("2 | tc_scoe ....send TC via EDEN (TC,SCOE)")
LOG("3 | cmd_exec ...send message via EDEN (CMD,EXEC)")
LOG("")
# ---------------------------------------------------------------------------
def quitCmd(self, argv):
"""Decoded quit command"""
UTIL.TASK.s_parentTask.stop()
# ---------------------------------------------------------------------------
def tcSpaceCmd(self, argv):
"""Decoded (TC,SPACE) command"""
global s_client
if len(argv) != 1:
LOG_WARNING("Invalid command argument(s)")
LOG("usage: tc_space")
LOG("or: 1")
return
s_client.sendTcSpace(UnitTest.testData.TC_PACKET_01)
# ---------------------------------------------------------------------------
def tcScoeCmd(self, argv):
"""Decoded (TC,SCOE) command"""
global s_client
if len(argv) != 1:
LOG_WARNING("Invalid command argument(s)")
LOG("usage: tc_scoe")
LOG("or: 2")
return
s_client.sendTcScoe(UnitTest.testData.TC_PACKET_01)
# ---------------------------------------------------------------------------
def cmdExecCmd(self, argv):
"""Decoded (CMD,EXEC) command"""
global s_client
if len(argv) != 2:
LOG_WARNING("Invalid command argument(s)")
LOG("usage: cmd_exec <message>")
LOG("or: 3 <message>")
return
message = argv[1]
s_client.sendCmdExec(message)
# =============================================================================
class Client(EGSE.EDEN.Client):
"""Subclass of EGSE.EDEN.Client"""
def __init__(self):
"""Initialise attributes only"""
EGSE.EDEN.Client.__init__(self)
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["SYS_COLOR_LOG", "1"],
["HOST", "127.0.0.1"],
["CCS_SERVER_PORT", "48569"]])
# -----------------------------------------------------------------------------
def createClient():
"""create the EDEN client"""
global s_client
s_client = Client()
if not s_client.connectToServer(
serverHost=UTIL.SYS.s_configuration.HOST,
serverPort=int(UTIL.SYS.s_configuration.CCS_SERVER_PORT)):
sys.exit(-1)
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = ModelTask()
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the EDEN client
LOG("Open the EDEN client")
createClient()
# start the tasks
LOG("start modelTask...")
modelTask.start()
| Stefan-Korner/SpacePyLibrary | AppTest/testEDENclient.py | Python | mit | 5,625 |
from django.contrib import admin
from layerdefinitions.models import LayerDefinition, Review
class LayerDefinitionInline(admin.TabularInline):
model = Review
list_display = ('review_date', 'comment', 'reviewer')
@admin.register(LayerDefinition)
class LayerDefinitionAdmin(admin.ModelAdmin):
inlines = [LayerDefinitionInline, ]
list_display = ('name', 'description', 'creator', 'upload_date',)
search_fields = ('name', 'description', 'provider')
@admin.register(Review)
class LayerDefinitionReviewAdmin(admin.ModelAdmin):
list_display = ('resource', 'reviewer', 'comment', 'review_date',)
| qgis/QGIS-Django | qgis-app/layerdefinitions/admin.py | Python | gpl-2.0 | 618 |
#!/usr/bin/python
#
# Copyright (c) 2011-2013 Jason Dobies
#
# This file is part of Okaara.
#
# Okaara is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# Okaara is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with Okaara.
# If not, see <http://www.gnu.org/licenses/>.
import okaara.prompt
from okaara.table import Table, WRAP_POLICY_WRAP, ALIGN_RIGHT, ALIGN_LEFT, ALIGN_CENTER
# -----------------------------------------------------------------------------
TEST_DATA = [
['1', 'Entry 1', 'Lorem ipsum dolor sit amet,'],
['2', 'Entry 2', 'consectetur adipisicing'],
['3', 'Entry 3', 'elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'],
['4', 'Entry 4', 'Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.'],
['5', 'Entry 5', 'Duis aute irure dolor'],
]
TEST_HEADERS = ['ID', 'Title', 'Description']
NUM_COLS = len(TEST_HEADERS)
PROMPT = okaara.prompt.Prompt()
# -----------------------------------------------------------------------------
def main():
mappings = [
['Table defaults: no column separator, truncate wrap policy, table the width of the terminal', basic],
['Smaller table width with cell wrap policy set to wrap', wrapped],
['Custom formatting for header divider and column separator', custom_formatting],
['Alternating colored rows and header row colors', colored],
['Table defaults, no header data specified', no_headers],
['First column is right aligned, second center aligned, custom column widths', alignments],
['Header columns centered', header_alignments],
]
menu_values = [m[0] for m in mappings]
selected = PROMPT.prompt_menu('Select the table to demonstrate:', menu_values=menu_values)
PROMPT.write('')
if selected is okaara.prompt.ABORT:
return
func = mappings[selected][1]
func()
# -- examples -----------------------------------------------------------------
def basic():
PROMPT.write('Table rendered using the defaults.')
PROMPT.write('')
table = Table(PROMPT, NUM_COLS)
table.render(TEST_DATA, headers=TEST_HEADERS)
def wrapped():
PROMPT.write('Smaller table width with cell wrap policy set to wrap.')
PROMPT.write('')
table = Table(PROMPT, NUM_COLS, table_width=60, wrap_policy=WRAP_POLICY_WRAP)
table.render(TEST_DATA, headers=TEST_HEADERS)
def custom_formatting():
PROMPT.write('Custom formatting for header divider and column separator')
PROMPT.write('')
table = Table(PROMPT, NUM_COLS, table_width=60, wrap_policy=WRAP_POLICY_WRAP, header_divider_tick='*', col_separator=' | ')
table.render(TEST_DATA, headers=TEST_HEADERS)
def colored():
PROMPT.write('Alternating colored rows and header row colors')
PROMPT.write('')
table = Table(PROMPT, NUM_COLS, table_width=60, wrap_policy=WRAP_POLICY_WRAP)
table.header_color=okaara.prompt.COLOR_BG_BLUE
table.row_colors=[okaara.prompt.COLOR_LIGHT_BLUE, okaara.prompt.COLOR_LIGHT_PURPLE, okaara.prompt.COLOR_CYAN]
table.render(TEST_DATA, headers=TEST_HEADERS)
def no_headers():
PROMPT.write('Table defaults, no header data specified')
PROMPT.write('')
table = Table(PROMPT, NUM_COLS)
table.render(TEST_DATA)
def alignments():
PROMPT.write('First column is right aligned, second center aligned, custom column widths')
PROMPT.write('')
alignments = [ALIGN_LEFT for i in range(0, NUM_COLS)]
alignments[0] = ALIGN_RIGHT
alignments[1] = ALIGN_CENTER
widths = [20 for i in range(0, NUM_COLS)]
widths[0] = 5
table = Table(PROMPT, NUM_COLS, col_alignments=alignments, col_widths=widths, wrap_policy=WRAP_POLICY_WRAP)
table.render(TEST_DATA, headers=TEST_HEADERS)
def header_alignments():
PROMPT.write('Header columns centered')
PROMPT.write('')
alignments = [ALIGN_CENTER for i in range(0, NUM_COLS)]
table = Table(PROMPT, NUM_COLS, table_width=60, wrap_policy=WRAP_POLICY_WRAP, header_col_alignments=alignments)
table.render(TEST_DATA, headers=TEST_HEADERS)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| jdob/okaara | samples/sample_table.py | Python | gpl-2.0 | 4,650 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
import re
import simplejson
import time
import warnings
from datetime import datetime
from urllib import quote, unquote, urlencode
from urlparse import urlparse
from genshi.core import Stream
from paste.util import mimeparse
from pylons import app_globals, config, request, response, translator
from webhelpers import date, feedgenerator, html, number, misc, text, paginate, containers
from webhelpers.html import tags
from webhelpers.html.builder import literal
from webhelpers.html.converters import format_paragraphs
from mediadrop.lib.auth import viewable_media
from mediadrop.lib.compat import any, md5
from mediadrop.lib.filesize import format_filesize
from mediadrop.lib.i18n import (N_, _, format_date, format_datetime,
format_decimal, format_time)
from mediadrop.lib.players import (embed_player, embed_iframe, media_player,
pick_any_media_file, pick_podcast_media_file)
from mediadrop.lib.thumbnails import thumb, thumb_url
from mediadrop.lib.uri import (best_link_uri, download_uri, file_path,
pick_uri, pick_uris, web_uri)
from mediadrop.lib.util import (current_url, delete_files, merge_dicts,
redirect, url, url_for, url_for_media)
from mediadrop.lib.xhtml import (clean_xhtml, decode_entities, encode_entities,
excerpt_xhtml, line_break_xhtml, list_acceptable_xhtml, strip_xhtml,
truncate_xhtml)
from mediadrop.plugin.events import (meta_description, meta_keywords,
meta_robots_noindex, observes, page_title)
__all__ = [
# Imports that should be exported:
'any',
'clean_xhtml',
'current_url',
'config', # is this appropriate to export here?
'containers',
'content_type_for_response',
'date',
'decode_entities',
'encode_entities',
'excerpt_xhtml',
'feedgenerator',
'format_date',
'format_datetime',
'format_decimal',
'format_paragraphs',
'format_time',
'html',
'line_break_xhtml',
'list_acceptable_xhtml',
'literal',
'meta_description',
'meta_keywords', # XXX: imported from mediadrop.plugin.events
'meta_robots_noindex',
'misc',
'number',
'page_title', # XXX: imported from mediadrop.plugin.events
'paginate',
'quote',
'strip_xhtml',
'tags',
'text',
'thumb', # XXX: imported from mediadrop.lib.thumbnails, for template use.
'thumb_url', # XXX: imported from mediadrop.lib.thumbnails, for template use.
'truncate_xhtml',
'unquote',
'url',
'url_for',
'url_for_media',
'urlencode',
'urlparse',
'viewable_media',
# Locally defined functions that should be exported:
'append_class_attr',
'best_translation',
'can_edit',
'delete_files',
'doc_link',
'duration_from_seconds',
'duration_to_seconds',
'filter_library_controls',
'filter_vulgarity',
'get_featured_category',
'gravatar_from_email',
'is_admin',
'js',
'mediadrop_version',
'pick_any_media_file',
'pick_podcast_media_file',
'pretty_file_size',
'redirect',
'store_transient_message',
'truncate',
'wrap_long_words',
]
__all__.sort()
js_sources = {
'mootools_more': '/scripts/third-party/mootools-1.2.4.4-more-yui-compressed.js',
'mootools_core': '/scripts/third-party/mootools-1.2.6-core-2013-01-16.min.js',
}
js_sources_debug = {
'mootools_more': '/scripts/third-party/mootools-1.2.4.4-more.js',
'mootools_core': '/scripts/third-party/mootools-1.2.6-core-2013-01-16.js',
}
def js(source):
if config['debug'] and source in js_sources_debug:
return url_for(js_sources_debug[source])
return url_for(js_sources[source])
def mediadrop_version():
import mediadrop
return mediadrop.__version__
def duration_from_seconds(total_sec, shortest=True):
"""Return the HH:MM:SS duration for a given number of seconds.
Does not support durations longer than 24 hours.
:param total_sec: Number of seconds to convert into hours, mins, sec
:type total_sec: int
:param shortest: If True, return the shortest possible timestamp.
Defaults to True.
:rtype: unicode
:returns: String HH:MM:SS, omitting the hours if less than one.
"""
if not total_sec:
return u''
total = time.gmtime(total_sec)
if not shortest:
return u'%02d:%02d:%02d' % total[3:6]
elif total.tm_hour > 0:
return u'%d:%02d:%02d' % total[3:6]
else:
return u'%d:%02d' % total[4:6]
def duration_to_seconds(duration):
"""Return the number of seconds in a given HH:MM:SS.
Does not support durations longer than 24 hours.
:param duration: A HH:MM:SS or MM:SS formatted string
:type duration: unicode
:rtype: int
:returns: seconds
:raises ValueError: If the input doesn't matched the accepted formats
"""
if not duration:
return 0
try:
total = time.strptime(duration, '%H:%M:%S')
except ValueError:
total = time.strptime(duration, '%M:%S')
return total.tm_hour * 60 * 60 + total.tm_min * 60 + total.tm_sec
def content_type_for_response(available_formats):
content_type = mimeparse.best_match(
available_formats,
request.environ.get('HTTP_ACCEPT', '*/*')
)
# force a content-type: if the user agent did not specify any acceptable
# content types (e.g. just 'text/html' like some bots) we still need to
# set a content type, otherwise the WebOb will generate an exception
# AttributeError: You cannot access Response.unicode_body unless charset
# the only alternative to forcing a "bad" content type would be not to
# deliver any content at all - however most bots are just faulty and they
# requested something like 'sitemap.xml'.
return content_type or available_formats[0]
def truncate(string, size, whole_word=True):
"""Truncate a plaintext string to roughly a given size (full words).
:param string: plaintext
:type string: unicode
:param size: Max length
:param whole_word: Whether to prefer truncating at the end of a word.
Defaults to True.
:rtype: unicode
"""
return text.truncate(string, size, whole_word=whole_word)
html_entities = re.compile(r'&(\#x?[0-9a-f]{2,6}|[a-z]{2,10});')
long_words = re.compile(r'((\w|' + html_entities.pattern + '){5})([^\b])')
def wrap_long_words(string, _encode_entities=True):
"""Inject <wbr> periodically to let the browser wrap the string.
The <wbr /> tag is widely deployed and included in HTML5,
but it isn't XHTML-compliant. See this for more info:
http://dev.w3.org/html5/spec/text-level-semantics.html#the-wbr-element
:type string: unicode
:rtype: literal
"""
if _encode_entities:
string = encode_entities(string)
def inject_wbr(match):
groups = match.groups()
return u'%s<wbr />%s' % (groups[0], groups[-1])
string = long_words.sub(inject_wbr, string)
string = u'.<wbr />'.join(string.split('.'))
return literal(string)
def attrs_to_dict(attrs):
"""Return a dict for any input that Genshi's py:attrs understands.
For example::
<link py:match="link" py:if="h.attrs_to_dict(select('@*'))['rel'] == 'alternate'">
XXX: There is an edge case where a function may be passed in as a result of using a lambda in a
Tosca Widgets form definition to generate a dynamic container_attr value.
In this rare case we are checking for a callable, and using that value.
:param attrs: A collection of attrs
:type attrs: :class:`genshi.core.Stream`, :class:`genshi.core.Attrs`, :function:
``list`` of 2-tuples, ``dict``
:returns: All attrs
:rtype: ``dict``
"""
if isinstance(attrs, Stream):
attrs = list(attrs)
attrs = attrs and attrs[0] or []
if callable(attrs):
attrs = attrs()
if not isinstance(attrs, dict):
attrs = dict(attrs or ())
return attrs
def append_class_attr(attrs, class_name):
"""Append to the class for any input that Genshi's py:attrs understands.
This is useful when using XIncludes and you want to append a class
to the body tag, while still allowing all other tags to remain
unchanged.
For example::
<body py:match="body" py:attrs="h.append_class_attr(select('@*'), 'extra_special')">
:param attrs: A collection of attrs
:type attrs: :class:`genshi.core.Stream`, :class:`genshi.core.Attrs`,
``list`` of 2-tuples, ``dict``
:param class_name: The class name to append
:type class_name: unicode
:returns: All attrs
:rtype: ``dict``
"""
attrs = attrs_to_dict(attrs)
classes = attrs.get('class', None)
if not classes:
attrs['class'] = class_name
return attrs
class_list = classes.split(' ')
if class_name not in class_list:
class_list.append(class_name)
attrs['class'] = ' '.join(class_list)
return attrs
spaces_between_tags = re.compile('>\s+<', re.M)
def get_featured_category():
from mediadrop.model import Category
feat_id = request.settings['featured_category']
if not feat_id:
return None
feat_id = int(feat_id)
return Category.query.get(feat_id)
def filter_library_controls(query, show='latest'):
from mediadrop.model import Media
if show == 'latest':
query = query.order_by(Media.publish_on.desc())
elif show == 'popular':
query = query.order_by(Media.popularity_points.desc())
elif show == 'featured':
featured_cat = get_featured_category()
if featured_cat:
query = query.in_category(featured_cat)
return query, show
def has_permission(permission_name):
"""Return True if the logged in user has the given permission.
This always returns false if the given user is not logged in."""
return request.perm.contains_permission(permission_name)
def is_admin():
"""Return True if the logged in user has the "admin" permission.
For a default install a user has the "admin" permission if he is a member
of the "admins" group.
:returns: Whether or not the current user has "admin" permission.
:rtype: bool
"""
return has_permission(u'admin')
def can_edit(item=None):
"""Return True if the logged in user has the "edit" permission.
For a default install this is true for all members of the "admins" group.
:param item: unused parameter (deprecated)
:type item: unimplemented
:returns: Whether or not the current user has "edit" permission.
:rtype: bool
"""
if item is not None:
warnings.warn(u'"item" parameter for can_edit() is deprecated',
DeprecationWarning, stacklevel=2)
return has_permission(u'edit')
def gravatar_from_email(email, size):
"""Return the URL for a gravatar image matching the provided email address.
:param email: the email address
:type email: string or unicode or None
:param size: the width (or height) of the desired image
:type size: int
"""
if email is None:
email = ''
# Set your variables here
gravatar_url = "http://www.gravatar.com/avatar/%s?size=%d" % \
(md5(email).hexdigest(), size)
return gravatar_url
def pretty_file_size(size):
"""Return the given file size in the largest possible unit of bytes."""
if not size:
return u'-'
return format_filesize(size, locale=translator.locale)
def store_transient_message(cookie_name, text, time=None, path='/', **kwargs):
"""Store a JSON message dict in the named cookie.
The cookie will expire at the end of the session, but should be
explicitly deleted by whoever reads it.
:param cookie_name: The cookie name for this message.
:param text: Message text
:param time: Optional time to report. Defaults to now.
:param path: Optional cookie path
:param kwargs: Passed into the JSON dict
:returns: The message python dict
:rtype: dict
"""
time = datetime.now().strftime('%H:%M, %B %d, %Y')
msg = kwargs
msg['text'] = text
msg['time'] = time or datetime.now().strftime('%H:%M, %B %d, %Y')
new_data = quote(simplejson.dumps(msg))
response.set_cookie(cookie_name, new_data, path=path)
return msg
def doc_link(page=None, anchor='', text=N_('Help'), **kwargs):
"""Return a link (anchor element) to the documentation on the project site.
XXX: Target attribute is not XHTML compliant.
"""
attrs = {
'href': 'http://mediadrop.net/docs/user/%s.html#%s' % (page, anchor),
'target': '_blank',
}
if kwargs:
attrs.update(kwargs)
attrs_string = ' '.join(['%s="%s"' % (key, attrs[key]) for key in attrs])
out = '<a %s>%s</a>' % (attrs_string, _(text))
return literal(out)
@observes(page_title)
def default_page_title(default=None, **kwargs):
settings = request.settings
title_order = settings.get('general_site_title_display_order', None)
site_name = settings.get('general_site_name', default)
if not default:
return site_name
if not title_order:
return '%s | %s' % (default, site_name)
elif title_order.lower() == 'append':
return '%s | %s' % (default, site_name)
else:
return '%s | %s' % (site_name, default)
@observes(meta_description)
def default_media_meta_description(default=None, media=None, **kwargs):
if media and media != 'all' and media.description_plain:
return truncate(media.description_plain, 249)
return None
@observes(meta_keywords)
def default_media_meta_keywords(default=None, media=None, **kwargs):
if media and media != 'all' and media.tags:
return ', '.join(tag.name for tag in media.tags[:15])
return None
def filter_vulgarity(text):
"""Return a sanitized version of the given string.
Words are defined in the Comments settings and are
replaced with \*'s representing the length of the filtered word.
:param text: The string to be filtered.
:type text: str
:returns: The filtered string.
:rtype: str
"""
vulgar_words = request.settings.get('vulgarity_filtered_words', None)
if vulgar_words:
words = (word.strip() for word in vulgar_words.split(','))
word_pattern = '|'.join(re.escape(word) for word in words if word)
word_expr = re.compile(word_pattern, re.IGNORECASE)
def word_replacer(matchobj):
word = matchobj.group(0)
return '*' * len(word)
text = word_expr.sub(word_replacer, text)
return text
def best_translation(a, b):
"""Return the best translation given a preferred and a fallback string.
If we have a translation for our preferred string 'a' or if we are using
English, return 'a'. Otherwise, return a translation for the fallback string 'b'.
:param a: The preferred string to translate.
:param b: The fallback string to translate.
:returns: The best translation
:rtype: string
"""
translated_a = _(a)
if a != translated_a or translator.locale.language == 'en':
return translated_a
else:
return _(b)
| timohtey/mediadrop_copy | mediadrop/lib/helpers.py | Python | gpl-3.0 | 15,572 |
import pprint
import re
import sys
import unittest
sys.path.insert(0, '..')
import pycparser.c_ast as c_ast
class Test_c_ast(unittest.TestCase):
def test_BinaryOp(self):
b1 = c_ast.BinaryOp(
op='+',
left=c_ast.Constant(type='int', value='6'),
right=c_ast.ID(name='joe'))
self.failUnless(isinstance(b1.left, c_ast.Constant))
self.assertEqual(b1.left.type, 'int')
self.assertEqual(b1.left.value, '6')
self.failUnless(isinstance(b1.right, c_ast.ID))
self.assertEqual(b1.right.name, 'joe')
class TestNodeVisitor(unittest.TestCase):
class ConstantVisitor(c_ast.NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
def test_scalar_children(self):
b1 = c_ast.BinaryOp(
op='+',
left=c_ast.Constant(type='int', value='6'),
right=c_ast.ID(name='joe'))
cv = self.ConstantVisitor()
cv.visit(b1)
self.assertEqual(cv.values, ['6'])
b2 = c_ast.BinaryOp(
op='*',
left=c_ast.Constant(type='int', value='111'),
right=b1)
b3 = c_ast.BinaryOp(
op='^',
left=b2,
right=b1)
cv = self.ConstantVisitor()
cv.visit(b3)
self.assertEqual(cv.values, ['111', '6', '6'])
def tests_list_children(self):
c1 = c_ast.Constant(type='float', value='5.6')
c2 = c_ast.Constant(type='char', value='t')
b1 = c_ast.BinaryOp(
op='+',
left=c1,
right=c2)
b2 = c_ast.BinaryOp(
op='-',
left=b1,
right=c2)
comp = c_ast.Compound(
block_items=[b1, b2, c1, c2])
cv = self.ConstantVisitor()
cv.visit(comp)
self.assertEqual(cv.values,
['5.6', 't', '5.6', 't', 't', '5.6', 't'])
if __name__ == '__main__':
unittest.main()
| songjiguo/interface-code-generator | tests/test_c_ast.py | Python | bsd-3-clause | 2,224 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from ..view.Ui_DamageForm import *
from ..model.DatabaseHelper import *
from ..model.core_classes import CoDamage
from ..utils.PluginUtils import *
from ..utils.LayerUtils import *
from DamageDialog import *
class DamageForm(QWidget, Ui_DamageForm, DatabaseHelper):
def __init__(self, damaged_asset_type, damaged_asset_id, parent=None):
super(DamageForm, self).__init__(parent)
DatabaseHelper.__init__(self)
self.setupUi(self)
self.__damaged_asset_id = damaged_asset_id
self.__damaged_asset_type = damaged_asset_type
try:
PluginUtils.run_query(self.__populate_damage_twidget)
except (WntException, SQLAlchemyError) as e:
PluginUtils.show_error(self, self.tr('Database Error'), e.args[0])
self.edit_damage_button.setEnabled(False)
self.view_maxcom_button.setEnabled(False)
def __populate_damage_twidget(self):
self.damage_twidget.clearContents()
self.damage_twidget.setRowCount(0)
count = self.session.query(CoDamage).filter(CoDamage.asset_type == self.__damaged_asset_type).\
filter(CoDamage.asset_id == self.__damaged_asset_id).count()
self.damage_twidget.setRowCount(count)
i = 0
for co_damage in self.session.query(CoDamage).filter(CoDamage.asset_type == self.__damaged_asset_type).\
filter(CoDamage.asset_id == self.__damaged_asset_id). \
order_by(CoDamage.occurrence_timestamp):
control_no = '' if co_damage.control_no is None else co_damage.control_no
item = QTableWidgetItem('{}'.format(control_no))
item.setData(Qt.UserRole, co_damage.id)
self.damage_twidget.setItem(i, 0, item)
if co_damage.received_from is not None:
item = QTableWidgetItem(co_damage.received_from)
self.damage_twidget.setItem(i, 1, item)
if co_damage.occurrence_timestamp is not None:
item = QTableWidgetItem(co_damage.occurrence_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 2, item)
if co_damage.registration_timestamp is not None:
item = QTableWidgetItem(co_damage.registration_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 3, item)
if co_damage.repair_timestamp is not None:
item = QTableWidgetItem(co_damage.repair_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 4, item)
if co_damage.repaired_by is not None:
item = QTableWidgetItem(co_damage.repaired_by)
self.damage_twidget.setItem(i, 5, item)
if co_damage.repair_task is not None:
item = QTableWidgetItem(co_damage.repair_task)
self.damage_twidget.setItem(i, 6, item)
if co_damage.cl_damage_type is not None:
description = co_damage.cl_damage_type.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 7, item)
if co_damage.cl_damage_cause is not None:
description = co_damage.cl_damage_cause.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 8, item)
if co_damage.cl_damage_status is not None:
description = co_damage.cl_damage_status.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 9, item)
item = QTableWidgetItem(co_damage.note)
self.damage_twidget.setItem(i, 10, item)
i += 1
self.damage_twidget.resizeColumnsToContents()
self.damage_twidget.horizontalHeader().setStretchLastSection(True)
@pyqtSlot()
def on_view_maxcom_button_clicked(self):
if len(self.damage_twidget.selectionModel().selectedRows()) == 0:
return
for index in self.damage_twidget.selectionModel().selectedRows():
row = index.row()
control_no = self.damage_twidget.item(row, 0).text()
if len(control_no) > 0:
PluginUtils.open_maxcom(int(control_no), 'COMPLAINT_DAMAGE')
@pyqtSlot()
def on_edit_damage_button_clicked(self):
if len(self.damage_twidget.selectionModel().selectedRows()) == 0:
return
for index in self.damage_twidget.selectionModel().selectedRows():
damage_id = self.damage_twidget.item(index.row(), 0).data(Qt.UserRole)
layer = LayerUtils.layer_by_data_source('core', 'co_damage')
dlg = DamageDialog(layer, damage_id, True, self)
if dlg.exec_() == QDialog.Accepted:
self.__reload_and_select_damage(damage_id)
def __reload_and_select_damage(self, damage_id):
try:
PluginUtils.run_query(self.__populate_damage_twidget)
except (WntException, SQLAlchemyError) as e:
PluginUtils.show_error(self, self.tr('Database Error'), e.args[0])
return
self.__select_damage(damage_id)
self.damage_twidget.setFocus()
def __select_damage(self, damage_id):
for row in range(self.damage_twidget.rowCount()):
damage_id_2 = self.damage_twidget.item(row, 0).data(Qt.UserRole)
if damage_id_2 == damage_id:
self.damage_twidget.selectRow(row)
def keyPressEvent(self, e):
if e.key() == Qt.Key_F1:
PluginUtils.show_help("Add_Edit_Damage.htm")
| gc-i/wntOS | controller/DamageForm.py | Python | gpl-3.0 | 5,612 |
#!/usr/bin/env python
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import urllib2
import unittest
BASEURL = "http://127.0.0.1:8080"
class TestYourWebserver(unittest.TestCase):
def setUp(self,baseurl=BASEURL):
"""do nothing"""
self.baseurl = baseurl
def test_css(self):
url = self.baseurl + "/base.css"
req = urllib2.urlopen(url, None, 3)
self.assertTrue( req.getcode() == 200 , "200 OK Not FOUND!")
self.assertTrue( req.info().gettype() == "text/css", ("Bad mimetype for css! %s" % req.info().gettype()))
def test_get_root(self):
url = self.baseurl + "/"
req = urllib2.urlopen(url, None, 3)
self.assertTrue( req.getcode() == 200 , "200 OK Not FOUND!")
def test_get_indexhtml(self):
url = self.baseurl + "/index.html"
req = urllib2.urlopen(url, None, 3)
self.assertTrue( req.getcode() == 200 , "200 OK Not FOUND!")
def test_get_404(self):
url = self.baseurl + "/do-not-implement-this-page-it-is-not-found"
try:
req = urllib2.urlopen(url, None, 3)
self.assertTrue( False, "Should have thrown an HTTP Error!")
except urllib2.HTTPError as e:
self.assertTrue( e.getcode() == 404 , ("404 Not FOUND! %d" % e.getcode()))
else:
self.assertTrue( False, "Another Error was thrown!")
if __name__ == '__main__':
unittest.main()
| tbrockman/CMPUT404-assignment-webserver | freetests.py | Python | apache-2.0 | 1,988 |
import tempfile
import sys
import os
import warnings
import numpy as np
from nose import SkipTest
from numpy.core import *
from numpy.compat import asbytes
from numpy.testing.utils import WarningManager
from numpy.compat import asbytes, getexception, strchar
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4,5)
self.three = arange(60,dtype=float64).reshape(2,5,6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4,5))
assert_equal(self.three.shape, (2,5,6))
self.three.shape = (10,3,2)
assert_equal(self.three.shape, (10,3,2))
self.three.shape = (2,5,6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray([size], buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
#self.assertRaises(ValueError, make_array, 8, 3, 0)
#self.assertRaises(ValueError, lambda: ndarray([1], strides=4))
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7,3,1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
#self.assertRaises(ValueError, make_array, 8, 3, 0)
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3,2,1), t)
y = empty((3,2,1), t)
x.fill(1)
y[...] = 1
assert_equal(x,y)
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0,0.0), (1,1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2,3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0,1,2],[0,1,2]])
a[...] = np.arange(2).reshape(2,1)
assert_equal(a, [[0,0,0],[1,1,1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1,2,3)
assert_equal(a, [[5,4,3],[2,1,0]])
# The other type of broadcasting would require a reduction operation.
def assign(a,b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2,2,3))
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a,b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...] is a)
self.assertTrue(b[...] is b)
def test_empty_subscript(self):
a,b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a,b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a,b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a,b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a,b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a,b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1,1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a,b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((),buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0,1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape,())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape,())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1,1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1,2,3,3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0,0,1,2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3,2,1,0])
a = np.arange(6).reshape(2,3)
a[::-1,:] = a[:,::-1]
assert_equal(a, [[5,4,3],[2,1,0]])
a = np.arange(6).reshape(2,3)
a[::-1,::-1] = a[:,::-1]
assert_equal(a, [[3,4,5],[0,1,2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2,3,4,3,4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0,1,0,1,2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4,3,2,3,4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0,1,2,1,0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2,3,4,3,4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0,1,0,1,2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123','123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, these aren't equal
assert_(np.any(a['a'].T != a.T['a']))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1,2,3],'a', [[1,2],[3,4]]),([3,3,3],'b',[[0,0],[0,0]])],
dtype=[('a', ('f4',3)), ('b', np.object), ('c', ('i4',(2,2)))])
b = a.copy()
assert_equal(a==b, [True,True])
assert_equal(a!=b, [False,False])
b[1].b = 'c'
assert_equal(a==b, [True,False])
assert_equal(a!=b, [False,True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False,False])
assert_equal(a!=b, [True,True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i,j] = 10
assert_equal(a==b, [False,True])
assert_equal(a!=b, [True,False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)],[(1,)]],dtype=[('a','f8')])
b = np.array([(0,),(0,),(1,)],dtype=[('a','f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)],[(1,)]],dtype=[('a','f8',(1,))])
b = np.array([(0,),(0,),(1,)],dtype=[('a','f8',(1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))])
b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))], order='F')
b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1,2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2,1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
assert_equal(x == y, False)
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
class TestMethods(TestCase):
def test_test_round(self):
assert_equal(array([1.2,1.5]).round(), [1,2])
assert_equal(array(1.5).round(), 2)
assert_equal(array([12.2,15.5]).round(-1), [10,20])
assert_equal(array([12.15,15.51]).round(1), [12.2,15.5])
def test_transpose(self):
a = array([[1,2],[3,4]])
assert_equal(a.transpose(), [[1,3],[2,4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0,0))
self.assertRaises(ValueError, lambda: a.transpose(0,1,2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q','m','h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q','m','h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q','m','h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = range(101)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f',float),('i',int)])
a = array([(i,i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3,2],[1,0]])
b = np.array([[1,0],[3,2]])
c = np.array([[2,3],[0,1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21,32,14])
x2=np.array(['my','first','name'])
x3=np.array([3.1,4.5,6.2])
r=np.rec.fromarrays([x1,x2,x3],names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14,21,32]))
assert_equal(r.word, array(['name','my','first']))
assert_equal(r.number, array([6.2,3.1,4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32,21,14]))
assert_equal(r.word, array(['first','my','name']))
assert_equal(r.number, array([4.5,3.1,6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21,32,14]))
assert_equal(r.word, array(['my','first','name']))
assert_equal(r.number, array([3.1,4.5,6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'),('col2',strtype)]
r = np.array([('a', 1),('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q','m','h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q','m','h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q','m','h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = range(101)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f',float),('i',int)])
a = array([(i,i) for i in range(101)], dtype = dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3,2],[1,0]])
b = np.array([[1,1],[0,0]])
c = np.array([[1,0],[1,0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([np.nan, 1, 0])
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1,4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1,10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0,128],dtype='<i4')
b = a.searchsorted(np.array(128,dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0,128],dtype='>i4')
b = a.searchsorted(np.array(128,dtype='>i4'))
assert_equal(b, 1, msg)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5,2,1,3,4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1,(2,3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1,2,3,4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1,2,3,4,5,6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0,1,2,3,5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1,0,1,2,3])
a = np.random.rand(100)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
assert_equal(a.searchsorted(k, side='l', sorter=s), [0, 20, 40, 60, 80])
assert_equal(a.searchsorted(k, side='r', sorter=s), [20, 40, 60, 80, 100])
def test_flatten(self):
x0 = np.array([[1,2,3],[4,5,6]], np.int32)
x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32)
y0 = np.array([1,2,3,4,5,6], np.int32)
y0f = np.array([1,4,2,5,3,6], np.int32)
y1 = np.array([1,2,3,4,5,6,7,8], np.int32)
y1f = np.array([1,5,3,7,2,6,4,8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_deprecation(self):
import warnings
from numpy.testing.utils import WarningManager
def collect_warning_types(f, *args, **kwargs):
ctx = WarningManager(record=True)
warning_log = ctx.__enter__()
warnings.simplefilter("always")
try:
f(*args, **kwargs)
finally:
ctx.__exit__()
return [w.category for w in warning_log]
a = np.arange(9).reshape(3, 3)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warning_types(a.diagonal().__setitem__, 0, 10),
[FutureWarning])
assert_equal(a, np.arange(9).reshape(3, 3))
assert_equal(collect_warning_types(np.diagonal(a).__setitem__, 0, 10),
[FutureWarning])
assert_equal(a, np.arange(9).reshape(3, 3))
assert_equal(collect_warning_types(np.diag(a).__setitem__, 0, 10),
[FutureWarning])
assert_equal(a, np.arange(9).reshape(3, 3))
# Views also warn
d = np.diagonal(a)
d_view = d.view()
assert_equal(collect_warning_types(d_view.__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(d[0], 10)
# Only one warning per call to diagonal, though (even if there are
# multiple views involved):
assert_equal(collect_warning_types(d.__setitem__, 0, 10),
[])
# Other ways of accessing the data also warn:
# .data goes via the C buffer API, gives a read-write
# buffer/memoryview. We don't warn until tp_getwritebuf is actually
# called, which is not until the buffer is written to.
have_memoryview = (hasattr(__builtins__, "memoryview")
or "memoryview" in __builtins__)
def get_data_and_write(getter):
buf_or_memoryview = getter(a.diagonal())
if (have_memoryview and isinstance(buf_or_memoryview, memoryview)):
buf_or_memoryview[0] = np.array(1)
else:
buf_or_memoryview[0] = "x"
assert_equal(collect_warning_types(get_data_and_write,
lambda d: d.data),
[FutureWarning])
if hasattr(np, "getbuffer"):
assert_equal(collect_warning_types(get_data_and_write,
np.getbuffer),
[FutureWarning])
# PEP 3118:
if have_memoryview:
assert_equal(collect_warning_types(get_data_and_write, memoryview),
[FutureWarning])
# Void dtypes can give us a read-write buffer, but only in Python 2:
import sys
if sys.version_info[0] < 3:
aV = np.empty((3, 3), dtype="V10")
assert_equal(collect_warning_types(aV.diagonal().item, 0),
[FutureWarning])
# XX it seems that direct indexing of a void object returns a void
# scalar, which ignores not just WARN_ON_WRITE but even WRITEABLE.
# i.e. in this:
# a = np.empty(10, dtype="V10")
# a.flags.writeable = False
# buf = a[0].item()
# 'buf' ends up as a writeable buffer. I guess no-one actually
# uses void types like this though...
# __array_interface also lets a data pointer get away from us
log = collect_warning_types(getattr, a.diagonal(),
"__array_interface__")
assert_equal(log, [FutureWarning])
# ctypeslib goes via __array_interface__:
try:
# may not exist in python 2.4:
import ctypes
except ImportError:
pass
else:
log = collect_warning_types(np.ctypeslib.as_ctypes, a.diagonal())
assert_equal(log, [FutureWarning])
# __array_struct__
log = collect_warning_types(getattr, a.diagonal(), "__array_struct__")
assert_equal(log, [FutureWarning])
# Make sure that our recommendation to silence the warning by copying
# the array actually works:
diag_copy = a.diagonal().copy()
assert_equal(collect_warning_types(diag_copy.__setitem__, 0, 10),
[])
# There might be people who get a spurious warning because they are
# extracting a buffer, but then use that buffer in a read-only
# fashion. And they might get cranky at having to create a superfluous
# copy just to work around this spurious warning. A reasonable
# solution would be for them to mark their usage as read-only, and
# thus safe for both past and future PyArray_Diagonal
# semantics. So let's make sure that setting the diagonal array to
# non-writeable will suppress these warnings:
ro_diag = a.diagonal()
ro_diag.flags.writeable = False
assert_equal(collect_warning_types(getattr, ro_diag, "data"), [])
# __array_interface__ has no way to communicate read-onlyness --
# effectively all __array_interface__ arrays are assumed to be
# writeable :-(
# ro_diag = a.diagonal()
# ro_diag.flags.writeable = False
# assert_equal(collect_warning_types(getattr, ro_diag,
# "__array_interface__"), [])
if hasattr(__builtins__, "memoryview"):
ro_diag = a.diagonal()
ro_diag.flags.writeable = False
assert_equal(collect_warning_types(memoryview, ro_diag), [])
ro_diag = a.diagonal()
ro_diag.flags.writeable = False
assert_equal(collect_warning_types(getattr, ro_diag,
"__array_struct__"), [])
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in xrange(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_ravel(self):
a = np.array([[0,1],[2,3]])
assert_equal(a.ravel(), [0,1,2,3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0,2,1,3])
assert_equal(a.ravel(order='C'), [0,1,2,3])
assert_equal(a.ravel(order='F'), [0,2,1,3])
assert_equal(a.ravel(order='A'), [0,1,2,3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0,1,2,3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0,1],[2,3]], order='F')
assert_equal(a.ravel(), [0,1,2,3])
assert_equal(a.ravel(order='A'), [0,2,1,3])
assert_equal(a.ravel(order='K'), [0,2,1,3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0,1],[2,3]])[::-1,:]
assert_equal(a.ravel(), [2,3,0,1])
assert_equal(a.ravel(order='C'), [2,3,0,1])
assert_equal(a.ravel(order='F'), [2,0,3,1])
assert_equal(a.ravel(order='A'), [2,3,0,1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2,3,0,1])
assert_(a.ravel(order='K').flags.owndata)
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1,2,3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2,9],[7,0],[3,8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str,3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1,2,3,4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1,2,3,4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1,(1,2))], dtype=[('a', 'i1', (2,2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1,1))
x[:,[0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1,1,1))
x[:,:,[0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1,1))
x[:,(0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1,1,1))
x[:,:,(0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_mask(self):
x = array([1,2,3,4])
m = array([0,1],bool)
assert_array_equal(x[m], array([2]))
def test_mask2(self):
x = array([[1,2,3,4],[5,6,7,8]])
m = array([0,1],bool)
m2 = array([[0,1],[1,0]], bool)
m3 = array([[0,1]], bool)
assert_array_equal(x[m], array([[5,6,7,8]]))
assert_array_equal(x[m2], array([2,5]))
assert_array_equal(x[m3], array([2]))
def test_assign_mask(self):
x = array([1,2,3,4])
m = array([0,1],bool)
x[m] = 5
assert_array_equal(x, array([1,5,3,4]))
def test_assign_mask(self):
xorig = array([[1,2,3,4],[5,6,7,8]])
m = array([0,1],bool)
m2 = array([[0,1],[1,0]],bool)
m3 = array([[0,1]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, array([[1,2,3,4],[10,10,10,10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, array([[1,10,3,4],[10,6,7,8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, array([[1,10,3,4],[5,6,7,8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This","is","example"])
g2 = array(["This","was","example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]])
def test_mixed(self):
g1 = array(["spam","spa","spammer","and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([u"This",u"is",u"example"])
g2 = array([u"This",u"was",u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0,np.nan)], 4),
([0, 1, 2, 3, complex(np.nan,0)], 4),
([0, 1, 2, complex(np.nan,0), 3], 3),
([0, 1, 2, complex(0,np.nan), 3], 3),
([complex(0,np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = range(a.ndim)
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0,np.nan)], 4),
([0, 1, 2, 3, complex(np.nan,0)], 4),
([0, 1, 2, complex(np.nan,0), 3], 3),
([0, 1, 2, complex(0,np.nan), 3], 3),
([complex(0,np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = range(a.ndim)
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1,2,3], 1000)
assert_equal(np.amax([[1,2,3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0,-0.1,0.1])
res = 250*sk[:,newaxis]
assert_almost_equal(res.ravel(),250*sk)
class TestClip(TestCase):
def _check_range(self,x,cmin,cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=','>']
else:
byte_orders = ['<','=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min,clip_max,x)
else:
x = x.clip(clip_min,clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder,byteorder)
self._check_range(x,expected_min,expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float',1024,-12.8,100.2, inplace=inplace)
self._clip_type('float',1024,0,0, inplace=inplace)
self._clip_type('int',1024,-120,100.5, inplace=inplace)
self._clip_type('int',1024,0,0, inplace=inplace)
x = self._clip_type('uint',1024,-120,100,expected_min=0,
inplace=inplace)
x = self._clip_type('uint',1024,0,0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3,0.5)
self._check_range(y,-0.3,0.5)
def test_max_or_min(self):
val = np.array([0,1,2,3,4,5,6,7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100,0,15]:
for types in np.sctypes.itervalues():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T),T,mask,val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1,2,3]), [True], 5)
def tst_byteorder(self,dtype):
x = np.array([1,2,3],dtype)
np.putmask(x,[True,False,True],-1)
assert_array_equal(x,[-1,2,-1])
def test_ip_byteorder(self):
for dtype in ('>i4','<i4'):
yield self.tst_byteorder,dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'],[True,False],10)
assert_array_equal(rec['x'],[10,5])
assert_array_equal(rec['y'],[2,4])
assert_array_equal(rec['z'],[3,3])
np.putmask(rec['y'],[True,False],11)
assert_array_equal(rec['x'],[10,5])
assert_array_equal(rec['y'],[11,4])
assert_array_equal(rec['z'],[3,3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self,x):
ind = range(x.shape[0])
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2,3,4
for types in np.sctypes.itervalues():
for T in types:
if T not in unchecked_types:
yield self.tst_basic,x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_raises(IndexError, x.take, [0,1,2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2,3,4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self,dtype):
x = np.array([1,2,3],dtype)
assert_array_equal(x.take([0,2,1]),[1,3,2])
def test_ip_byteorder(self):
for dtype in ('>i4','<i4'):
yield self.tst_byteorder,dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1,2,1,3,1,5]
b = [0,4,5,6,2,3]
idx = np.lexsort((b,a))
expected_idx = np.array([0,4,2,1,3,5])
assert_array_equal(idx,expected_idx)
x = np.vstack((b,a))
idx = np.lexsort(x)
assert_array_equal(idx,expected_idx)
assert_array_equal(x[1][idx],np.sort(x[1]))
class TestIO(object):
"""Test tofile, fromfile, tostring, and fromstring"""
def setUp(self):
shape = (2,4,3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:,1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.filename = tempfile.mktemp()
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
#tmp_file.close()
def test_bool_fromstring(self):
v = np.array([True,False,True,False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
os.unlink(self.filename)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tostring()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tostring('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1,2,3,4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1,2,3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1.,2.,3.,4.], sep=',')
self._check_from('1,2,3,4', [1.,2.,3.,4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1,3,4,5], sep='_x_')
def test_dtype(self):
v = np.array([1,2,3,4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
os.unlink(self.filename)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self,buffer,expected,kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs),expected)
def test_ip_basic(self):
for byteorder in ['<','>']:
for dtype in [float,int,np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4,7))*5).astype(dt)
buf = x.tostring()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = arange(20.0)
a = a0.reshape(4,5)
a0.shape = (4,5)
a.flags.writeable = False
self.a = a
self.b = a[::2,::2]
self.a0 = a0
self.b0 = a0[::2,::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5,5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat,0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError,x.resize,(5,1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3,2,1)
assert_(x.shape == (3,2,1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2,3,3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3,3)))
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f',float),('i',int)])
dt.names = ['p','q']
assert_equal(dt.names,['p','q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(ValueError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1)
assert_raises(ValueError, a.__getitem__, asbytes('f1'))
assert_raises(ValueError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(ValueError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1','f2']][0].tolist(), (2, 3))
assert_equal(b[['f2','f1']][0].tolist(), (3, 2))
assert_equal(b[['f1','f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1','f2']][0].view(('i4',2)).tolist(), (2, 3))
assert_equal(b[['f2','f1']][0].view(('i4',2)).tolist(), (3, 2))
view_dtype=[('f1', 'i4'),('f3', [('', 'i4')])]
assert_equal(b[['f1','f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_field_names_deprecation(self):
import warnings
from numpy.testing.utils import WarningManager
def collect_warning_types(f, *args, **kwargs):
ctx = WarningManager(record=True)
warning_log = ctx.__enter__()
warnings.simplefilter("always")
try:
f(*args, **kwargs)
finally:
ctx.__exit__()
return [w.category for w in warning_log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warning_types(a[['f1','f2']].__setitem__, 0, (10,20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1','f2']]
subset_view = subset.view()
assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there are
# multiple views involved):
assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10),
[])
class TestView(TestCase):
def test_basic(self):
x = np.array([(1,2,3,4),(5,6,7,8)],dtype=[('r',np.int8),('g',np.int8),
('b',np.int8),('a',np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
class TestStats(TestCase):
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1,2,3,4],[5,6,7,8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in xrange(12):
dot(f,v,r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f,v,out=None)
assert_array_equal(r2, r)
assert_(r is dot(f,v,out=r))
v = v[:,0].copy() # v.shape == (16,)
r = r[:,0].copy() # r.shape == (1024,)
r2 = dot(f,v)
assert_(r is dot(f,v,r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:,::2])
assert_raises(ValueError, dot, f, v, r[:,:32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2,501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,),dtype=int)
self.y = 3*ones((3,),dtype=int)
self.x2 = 2*ones((2,3), dtype=int)
self.y2 = 3*ones((2,3), dtype=int)
self.ind = [0,0,1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2,2,3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2,2,3],[2,2,3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2,2,3],[2,2,3]])
def can_use_decimal():
try:
from decimal import Decimal
return True
except ImportError:
return False
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_simple2d_object(self):
from decimal import Decimal
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_mirror2d_object(self):
from decimal import Decimal
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_simple_object(self):
from decimal import Decimal
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_mirror_object(self):
from decimal import Decimal
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
@dec.skipif(not can_use_decimal(),
"Skip neighborhood iterator tests for decimal objects " \
"(decimal module not available")
def test_circular_object(self):
from decimal import Decimal
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1,2])
y = np.array([1-2j,1+2j])
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1,2])
finally:
warn_ctx.__exit__()
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info >= (2, 6):
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in xrange(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1,2,3,4,5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1,2],[3,4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))])
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<i4')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1,2,3], dtype='>q')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1,2,3], dtype='<q')
self._check_roundtrip(x)
else:
x = np.array([1,2,3], dtype='>q')
self._check_roundtrip(x)
x = np.array([1,2,3], dtype='<q')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_export_simple_1d(self):
x = np.array([1,2,3,4,5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3,3,3), dtype=np.float32)[:,0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:^q:dx:B:e:@H:f:=I:g:L:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:^q:dx:B:e:@H:f:=I:g:Q:h:^Q:hx:=f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1,2],[3,4]],), dtype=[('a', ('i', (2,2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1,2,3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1,2,3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_padding(self):
for j in xrange(8):
x = np.array([(1,),(2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr' : '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1,1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
a = np.zeros(10)
del a
test_pydatamem_seteventhook_end()
class PriorityNdarray(object):
__array_priority__ = 1000
def __init__(self, array):
self.array = array
def __lt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array < array)
def __gt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array > array)
def __le__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array <= array)
def __ge__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array >= array)
def __eq__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array == array)
def __ne__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array != array)
class TestArrayPriority(TestCase):
def test_lt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l < r
res2 = l < rp
res3 = lp < r
res4 = lp < rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_gt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l > r
res2 = l > rp
res3 = lp > r
res4 = lp > rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_le(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l <= r
res2 = l <= rp
res3 = lp <= r
res4 = lp <= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ge(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l >= r
res2 = l >= rp
res3 = lp >= r
res4 = lp >= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_eq(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l == r
res2 = l == rp
res3 = lp == r
res4 = lp == rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ne(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l != r
res2 = l != rp
res3 = lp != r
res4 = lp != rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
if __name__ == "__main__":
run_module_suite()
| beiko-lab/gengis | bin/Lib/site-packages/numpy/core/tests/test_multiarray.py | Python | gpl-3.0 | 119,031 |
#!/usr/bin/env python
# Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Part 2 of the tutorial and covers Bag of Centroids
# for a Word2Vec model. This code assumes that you have already
# run Word2Vec and saved a model called "300features_40minwords_10context"
#
# *************************************** #
# Load a pre-trained model
from gensim.models import Word2Vec
from sklearn.cluster import KMeans
import time
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import numpy as np
import os
from KaggleWord2VecUtility import KaggleWord2VecUtility
# Define a function to create bags of centroids
#
def create_bag_of_centroids( wordlist, word_centroid_map ):
#
# The number of clusters is equal to the highest cluster index
# in the word / centroid map
num_centroids = max( word_centroid_map.values() ) + 1
#
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros( num_centroids, dtype="float32" )
#
# Loop over the words in the review. If the word is in the vocabulary,
# find which cluster it belongs to, and increment that cluster count
# by one
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
#
# Return the "bag of centroids"
return bag_of_centroids
if __name__ == '__main__':
model = Word2Vec.load("../result/300features_40minwords_10context")
# ****** Run k-means on the word vectors and print a few clusters
#
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.syn0
num_clusters = word_vectors.shape[0] / 5
# Initalize a k-means object and use it to extract centroids
print "Running K means"
kmeans_clustering = KMeans( n_clusters = num_clusters )
idx = kmeans_clustering.fit_predict( word_vectors )
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print "Time taken for K Means clustering: ", elapsed, "seconds."
# Create a Word / Index dictionary, mapping each vocabulary word to
# a cluster number
word_centroid_map = dict(zip( model.index2word, idx ))
# Print the first ten clusters
for cluster in xrange(0,10):
#
# Print the cluster number
print "\nCluster %d" % cluster
#
# Find all of the words for that cluster number, and print them out
words = []
for i in xrange(0,len(word_centroid_map.values())):
if( word_centroid_map.values()[i] == cluster ):
words.append(word_centroid_map.keys()[i])
print words
# Create clean_train_reviews and clean_test_reviews as we did before
#
# Read data from files
#train = pd.read_csv( os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, delimiter="\t", quoting=3 )
#test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", quoting=3 )
train = pd.read_csv("/home/charch/gitwork/Word2Vec/data/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3 )
test = pd.read_csv("/home/charch/gitwork/Word2Vec/data/testData.tsv", header=0, delimiter="\t", quoting=3)
print "Cleaning training reviews"
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append( KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
print "Cleaning test reviews"
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append( KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
# ****** Create bags of centroids
#
# Pre-allocate an array for the training set bags of centroids (for speed)
train_centroids = np.zeros( (train["review"].size, num_clusters), \
dtype="float32" )
# Transform the training set reviews into bags of centroids
counter = 0
for review in clean_train_reviews:
train_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# Repeat for test reviews
test_centroids = np.zeros(( test["review"].size, num_clusters), \
dtype="float32" )
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# ****** Fit a random forest and extract predictions
#
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print "Fitting a random forest to labeled training data..."
forest = forest.fit(train_centroids,train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
output.to_csv("../result/BagOfCentroids.csv", index=False, quoting=3)
print "Wrote BagOfCentroids.csv"
| CharLLCH/Word2Vec | pycode/Word2Vec_BagOfCentroids.py | Python | gpl-2.0 | 5,355 |
from threading import Lock
from pprint import pformat
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django import http
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, iri_to_uri
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
STATUS_CODE_TEXT = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
def safe_copyfileobj(fsrc, fdst, length=16*1024, size=0):
"""
A version of shutil.copyfileobj that will not read more than 'size' bytes.
This makes it safe from clients sending more than CONTENT_LENGTH bytes of
data in the body.
"""
if not size:
return
while size > 0:
buf = fsrc.read(min(length, size))
if not buf:
break
fdst.write(buf)
size -= len(buf)
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = base.get_script_name(environ)
path_info = force_unicode(environ.get('PATH_INFO', u'/'))
if not path_info or path_info == script_name:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
#
# (The comparison of path_info to script_name is to work around an
# apparent bug in flup 1.0.1. Se Django ticket #8490).
path_info = u'/'
self.environ = environ
self.path_info = path_info
self.path = '%s%s' % (script_name, path_info)
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
self._post_parse_error = False
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
if self._post_parse_error:
post = '<could not parse>'
else:
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return '<WSGIRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(get, post, cookies, meta)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.environ.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.environ.get('QUERY_STRING', ''))) or '')
def is_secure(self):
return 'wsgi.url_scheme' in self.environ \
and self.environ['wsgi.url_scheme'] == 'https'
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method == 'POST':
if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
self._raw_post_data = ''
try:
self._post, self._files = self.parse_file_upload(self.META, self.environ['wsgi.input'])
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._post = http.QueryDict('')
self._files = datastructures.MultiValueDict()
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._post_parse_error = True
raise
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
else:
self._post, self._files = http.QueryDict('', encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
# The WSGI spec says 'QUERY_STRING' may be absent.
self._get = http.QueryDict(self.environ.get('QUERY_STRING', ''), encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self.environ.get('HTTP_COOKIE', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
buf = StringIO()
try:
# CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
content_length = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length > 0:
safe_copyfileobj(self.environ['wsgi.input'], buf,
size=content_length)
self._raw_post_data = buf.getvalue()
buf.close()
return self._raw_post_data
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.initLock.acquire()
try:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
finally:
self.initLock.release()
set_script_prefix(base.get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(environ)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
try:
status_text = STATUS_CODE_TEXT[response.status_code]
except KeyError:
status_text = 'UNKNOWN STATUS CODE'
status = '%s %s' % (response.status_code, status_text)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append(('Set-Cookie', str(c.output(header=''))))
start_response(status, response_headers)
return response
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/core/handlers/wsgi.py | Python | bsd-3-clause | 9,928 |
#!/usr/bin/env python
import sys
import os
import platform
import cexbot
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if getattr(sys, 'version_info', (0, 0, 0)) < (2, 5, 0, 'final'):
raise SystemExit("cexbot requires Python 2.5 or later.")
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
scripts = ['cexbot-cli']
if os.name == 'nt':
scripts.append('bin/cexbot-cli.bat')
packages = [
'requests',
'semantic_version',
]
APP = ['cexbot/main.py']
DATA_FILES = ['cexbot-cli']
OPTIONS = {'argv_emulation': True}
package_dir = os.path.realpath(os.path.dirname(__file__))
def get_file_contents(file_path):
"""Get the context of the file using full path name"""
full_path = os.path.join(package_dir, file_path)
return open(full_path, 'r').read()
setup(
name = 'cexbot',
description = cexbot.__doc__.split('\n\n')[0],
long_description = get_file_contents('README.md'),
keywords = 'cexbot, bitcoin, finance',
url = 'https://github.com/nikcub/cexbot',
platforms = ['linux', 'osx'],
version = cexbot.get_version(),
author = 'Nik Cubrilovic',
author_email = 'nikcub@gmail.com',
license = get_file_contents('LICENSE'),
install_requires = packages,
packages = ['cexbot'],
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
scripts = scripts,
# entry_points={
# 'console_scripts': [
# "cexbot-cli = cexbot.command_utils:run_cl"
# ],
# 'gui_scripts': [
# "cexbot-gui = cexbot.command_utils:run_gui"
# ]
# },
) | nikcub/cexbot | setup.py | Python | mit | 1,604 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# InkCut, Plot HPGL directly from Inkscape.
# device.py
# device settings
#
# Copyright 2010 Jairus Martin <frmdstryr@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import serial
from lxml import etree
import cups
import os
class Device:
def __init__(self,config={}):
#self.xml = etree.parse(filename).getroot()
conf = {'width':0,'length':0,'name':'','interface':'serial','serial':{'port':'/dev/ttyUSB0','baud':9600}}
conf.update(config)
self.width = conf['width']
self.length = conf['length']
self.name = conf['name']
self.interface = conf['interface']
self.serial = conf['serial']
def getPrinters(self):
con = cups.Connection()
printers = con.getPrinters()
self.printers = printers
def save(self,id,attribs): # save settings to xml
dev = self.xml.find('device[@id="%s"]'%id)
err = []
# delete if exists?
if len(dev):
del dev[0]
else:
dev = etree.SubElement(self.xml,'device')
dev.set('id',id)
iface = etree.SubElement(d, "interface")
for key,value in attribs.iteritems():
iface.set(key,value)
def plot(self,filename):
def toSerial(data,settings):
assert type(data) == str, "input data must be a str type"
import serial
# set default settings
d = {'baud':9600}
d.update(settings);
#create serial and set settings
ser = serial.Serial()
ser.port = '/dev/ttyS0' #d['port']
ser.baudrate = d['baud']
ser.parity = d['parity']
ser.xonxoff = d['xonxoff']
ser.dsrdtr = d['dsrdtr']
ser.rtscts = d['rtscts']
ser.bytesize = d['bytesize']
ser.stopbits = d['stopbits']
ser.open()
if ser.isOpen():
#send data & return bits sent
bits = ser.write(data);
ser.close();
return True;
else:
return False;
def toPrinter(data,printer):
assert type(data) == str, "input data must be a str type"
assert type(printer) == str, "printer name must be a string"
printer = os.popen('lpr -P %s'%(printer),'w')
printer.write(data)
printer.close()
return True;
f=open(filename,'r')
if self.interface=='printer':
toPrinter(f.read(),self.name)
elif self.interface=='serial':
toSerial(f.read(),self.serial)
else:
raise AssertionError('Invalid interface type, only printers and serial connections are supported.')
| shackspace/inkcut_dmpl | app/bin/device.py | Python | gpl-3.0 | 3,688 |
import tensorflow as tf
from keras import backend as K
from keras import regularizers, constraints, initializers, activations
from keras.layers.recurrent import Recurrent
from keras.engine import InputSpec
tfPrint = lambda d, T: tf.Print(input_=T, data=[T, tf.shape(T)], message=d)
"""
Original code from the keras backend that
implements the _time_distributed_dense layer.
"""
import keras.backend as K
def _time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, output_dim=None,
timesteps=None, training=None):
"""Apply `y . w + b` for every temporal slice y of x.
# Arguments
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: wether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
output_dim: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
training: training phase tensor or boolean.
# Returns
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
x.set_shape([None, None, output_dim])
else:
x = K.reshape(x, (-1, timesteps, output_dim))
return x
class AttentionDecoder(Recurrent):
def __init__(self, units, output_dim,
activation='tanh',
return_probabilities=False,
name='AttentionDecoder',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
"""
Implements an AttentionDecoder that takes in a sequence encoded by an
encoder and outputs the decoded states
:param units: dimension of the hidden state and the attention matrices
:param output_dim: the number of labels in the output space
references:
Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio.
"Neural machine translation by jointly learning to align and translate."
arXiv preprint arXiv:1409.0473 (2014).
"""
self.units = units
self.output_dim = output_dim
self.return_probabilities = return_probabilities
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super(AttentionDecoder, self).__init__(**kwargs)
self.name = name
self.return_sequences = True # must return sequences
def build(self, input_shape):
"""
See Appendix 2 of Bahdanau 2014, arXiv:1409.0473
for model details that correspond to the matrices here.
"""
self.batch_size, self.timesteps, self.input_dim = input_shape
if self.stateful:
super(AttentionDecoder, self).reset_states()
self.states = [None, None] # y, s
"""
Matrices for creating the context vector
"""
self.V_a = self.add_weight(shape=(self.units,),
name='V_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.W_a = self.add_weight(shape=(self.units, self.units),
name='W_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.U_a = self.add_weight(shape=(self.input_dim, self.units),
name='U_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.b_a = self.add_weight(shape=(self.units,),
name='b_a',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the r (reset) gate
"""
self.C_r = self.add_weight(shape=(self.input_dim, self.units),
name='C_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_r = self.add_weight(shape=(self.units, self.units),
name='U_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_r = self.add_weight(shape=(self.output_dim, self.units),
name='W_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_r = self.add_weight(shape=(self.units, ),
name='b_r',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the z (update) gate
"""
self.C_z = self.add_weight(shape=(self.input_dim, self.units),
name='C_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_z = self.add_weight(shape=(self.units, self.units),
name='U_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_z = self.add_weight(shape=(self.output_dim, self.units),
name='W_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_z = self.add_weight(shape=(self.units, ),
name='b_z',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the proposal
"""
self.C_p = self.add_weight(shape=(self.input_dim, self.units),
name='C_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_p = self.add_weight(shape=(self.units, self.units),
name='U_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_p = self.add_weight(shape=(self.output_dim, self.units),
name='W_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_p = self.add_weight(shape=(self.units, ),
name='b_p',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for making the final prediction vector
"""
self.C_o = self.add_weight(shape=(self.input_dim, self.output_dim),
name='C_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_o = self.add_weight(shape=(self.units, self.output_dim),
name='U_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_o = self.add_weight(shape=(self.output_dim, self.output_dim),
name='W_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_o = self.add_weight(shape=(self.output_dim, ),
name='b_o',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
# For creating the initial state:
self.W_s = self.add_weight(shape=(self.input_dim, self.units),
name='W_s',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.input_spec = [
InputSpec(shape=(self.batch_size, self.timesteps, self.input_dim))]
self.built = True
def call(self, x):
# store the whole sequence so we can "attend" to it at each timestep
self.x_seq = x
# apply the a dense layer over the time dimension of the sequence
# do it here because it doesn't depend on any previous steps
# thefore we can save computation time:
self._uxpb = _time_distributed_dense(self.x_seq, self.U_a, b=self.b_a,
input_dim=self.input_dim,
timesteps=self.timesteps,
output_dim=self.units)
return super(AttentionDecoder, self).call(x)
def get_initial_state(self, inputs):
print('inputs shape:', inputs.get_shape())
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def step(self, x, states):
ytm, stm = states
# repeat the hidden state to the length of the sequence
_stm = K.repeat(stm, self.timesteps)
# now multiplty the weight matrix with the repeated hidden state
_Wxstm = K.dot(_stm, self.W_a)
# calculate the attention probabilities
# this relates how much other timesteps contributed to this one.
et = K.dot(activations.tanh(_Wxstm + self._uxpb),
K.expand_dims(self.V_a))
at = K.exp(et)
at_sum = K.sum(at, axis=1)
at_sum_repeated = K.repeat(at_sum, self.timesteps)
at /= at_sum_repeated # vector of size (batchsize, timesteps, 1)
# calculate the context vector
context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
# ~~~> calculate new hidden state
# first calculate the "r" gate:
rt = activations.sigmoid(
K.dot(ytm, self.W_r)
+ K.dot(stm, self.U_r)
+ K.dot(context, self.C_r)
+ self.b_r)
# now calculate the "z" gate
zt = activations.sigmoid(
K.dot(ytm, self.W_z)
+ K.dot(stm, self.U_z)
+ K.dot(context, self.C_z)
+ self.b_z)
# calculate the proposal hidden state:
s_tp = activations.tanh(
K.dot(ytm, self.W_p)
+ K.dot((rt * stm), self.U_p)
+ K.dot(context, self.C_p)
+ self.b_p)
# new hidden state:
st = (1-zt)*stm + zt * s_tp
yt = activations.softmax(
K.dot(ytm, self.W_o)
+ K.dot(stm, self.U_o)
+ K.dot(context, self.C_o)
+ self.b_o)
if self.return_probabilities:
return at, [yt, st]
else:
return yt, [yt, st]
def compute_output_shape(self, input_shape):
"""
For Keras internal compatability checking
"""
if self.return_probabilities:
return (None, self.timesteps, self.timesteps)
else:
return (None, self.timesteps, self.output_dim)
def get_config(self):
"""
For rebuilding models on load time.
"""
config = {
'output_dim': self.output_dim,
'units': self.units,
'return_probabilities': self.return_probabilities
}
base_config = super(AttentionDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | lukas/ml-class | examples/lstm/attention/attention_decoder.py | Python | gpl-2.0 | 15,894 |
# tr_mongo_rest_ext.py william k. johnson 2017
import logging
import re
import requests
from datetime import datetime
import json
from bson import json_util , ObjectId
import flask
from flask import Flask , request , send_file , render_template , url_for
from flask import redirect , Response , current_app , jsonify , Blueprint
from flask_pymongo import PyMongo
from flask_restful import Resource, Api
from pymongo.errors import DuplicateKeyError
try: # python 2
from urllib import urlencode
except ImportError: # python 3
from urllib.parse import urlencode
from cci_mta_trinity.streams.tr_mongo_rest import mongo
from cci_mta_trinity.application import app ,mongo_no_resource_exception
from cci_mta_trinity.streams.tr_mongo_rest import _logger
# --------------------------------------------------------------------------------------------------------
def imap2017_enum_message_metadata_by_folder():
"""
POST retrieve message metadata for cache
:return: message medtada json
"""
_logger.info( '...enum_message_metadata_by_folder...' )
result = None
msg_result = None
output = list()
count = 0
if request.method == 'POST' :
try :
data = json.loads( request.data )
folder = data['folder']
moniker = data['moniker']
moniker = '%s_%s' % ( moniker , folder )
db = mongo.db.domain_existential_message
messages = db.find( { "domain_canonical_store" : moniker })
if not messages :
_logger.error( '....enum_message_metadata_by_folder...' )
raise mongo_no_resource_exception( '..enum_message_metadata_by_folde...' )
for atom in messages :
output.append( {'moniker' : str( ObjectId( atom['_id'] ) ) ,
'local_id' : atom['local_id'] ,
'message_flags' : atom['message_flags'] ,
'payload_size' : atom['payload_size'] ,
'resource_locator' : atom['resource_locator'] ,
'uid' : atom['uid'] ,
'postmark' : atom['postmark']
})
count = len( output )
except Exception as e :
_logger.error( '...enum_message_metadata_by_folder... %s' % str( e ) )
return jsonify({'error' : str( e ) })
return jsonify( { "messages" : output ,
"count" : count } )
app.add_url_rule( '/mongo/imap2017/enum_message_metadata_by_folder' ,
'enum_message_metadata_by_folder' ,
view_func=imap2017_enum_message_metadata_by_folder ,
methods=['POST'] )
# --------------------------------------------------------------------------------------------------------
def imap2017_peek_existential_policy():
"""
POST retrieve policy brief info
:return: message medtada json
"""
_logger.info( '...peek existential policy...' )
policy = None
policy_record = None
if request.method == 'POST' :
try :
data = json.loads( request.data )
folder = data['folder']
moniker = data['moniker']
moniker = '%s_%s' % ( moniker , folder )
db = mongo.db.domain_materialized_store
result = db.find_one( { "_id" : moniker } )
if not result :
_logger.error( '....peek_existential_policy...' )
raise mongo_no_resource_exception( '..peek_existential_policy...folder' )
policy = result['delete_policy']
db = mongo.db.domain_existential_policies
policy_record = db.find_one( { "policy_moniker" : policy } )
if not policy_record :
_logger.error( '....peek_existential_policy...' )
raise mongo_no_resource_exception( '..peek_existential_policy...policy not found' )
except Exception as e :
_logger.error( '...peek existential policy... %s' % str( e ) )
return jsonify({'error' : str( e ) })
return jsonify( { "action" : policy_record['do_action'] ,
"recondition" : policy_record['do_action_precondition'] } )
app.add_url_rule( '/mongo/imap2017/peek_existential_policy' ,
'peek_existential_policy' ,
view_func=imap2017_peek_existential_policy ,
methods=['POST'] )
# --------------------------------------------------------------------------------------------------------
def imap2017_perform_meta_expunge():
"""
POST meta expunge
:return: message medtada json
"""
_logger.info( '...perform meta expunge...' )
result = None
deleted = None
if request.method == 'POST' :
try :
data = json.loads( request.data )
folder = data['folder']
moniker = data['moniker']
moniker = '%s_%s' % ( moniker , folder )
# move to staging
#
# copy
db = mongo.db.domain_existential_message
result = db.find( { "domain_canonical_store" : moniker ,
"message_flags" : { "$in" : ["\\Deleted"] } } )
if not result :
_logger.error( '....perform meta expunge...' )
raise mongo_no_resource_exception( '..perform meta expunge' )
oper = list( result )
db = mongo.db.domain_existential_message_staging
for document in oper :
# append old id for reference
document['old_obj_id'] = str( ObjectId( document['_id' ] ) )
del document['_id']
db.insert( document )
# delete
db = mongo.db.domain_existential_message
deleted = db.delete_many( { "domain_canonical_store" : moniker ,
"message_flags" : { "$in" : ["\\Deleted"] } } )
except Exception as e :
_logger.error( '...perform meta expunge... %s' % str( e ) )
return jsonify({'error' : str( e ) })
return jsonify( { "deleted_count" : deleted.deleted_count } )
app.add_url_rule( '/mongo/imap2017/perform_meta_expunge' ,
'perform_meta_expunge' ,
view_func=imap2017_perform_meta_expunge ,
methods=['POST'] ) | chromatic-universe/cci-stream-mta | src/cci_stream_tornado/cci_mta_trinity/streams/tr_mongo_rest_ext.py | Python | mit | 7,750 |
import unittest
from binary_conversion import convert_to_binary
class TestCorrectness(unittest.TestCase):
def test_converts_to_binary(self):
for i in range(1, 1000):
self.assertEqual(convert_to_binary(i), '{0:b}'.format(i))
if __name__ == '__main__':
unittest.main()
| Bradfield/algos | book/stacks/binary_conversion_test.py | Python | cc0-1.0 | 300 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
from pymatgen.core import Molecule
from pymatgen.io.qchem_io.inputs import QCInput
from pymatgen.io.qchem_io.utils import lower_and_check_unique
# Classes for reading/manipulating/writing QChem ouput files.
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
logger = logging.getLogger(__name__)
class QChemDictSet(QCInput):
"""
Build a QCInput given all the various input parameters. Can be extended by standard implementations below.
"""
def __init__(self,
molecule,
job_type,
basis_set,
scf_algorithm,
dft_rung=4,
pcm_dielectric=None,
max_scf_cycles=200,
geom_opt_max_cycles=200,
overwrite_inputs=None):
"""
Args:
molecule (Pymatgen molecule object)
job_type (str)
basis_set (str)
scf_algorithm (str)
dft_rung (int)
pcm_dielectric (str)
max_scf_cycles (int)
geom_opt_max_cycles (int)
overwrite_inputs (dict): This is dictionary of QChem input sections to add or overwrite variables,
the available sections are currently rem, pcm, and solvent. So the accepted keys are rem, pcm, or solvent
and the value is a dictionary of key value pairs relevant to the section. An example would be adding a
new variable to the rem section that sets symmetry to false.
ex. overwrite_inputs = {"rem": {"symmetry": "false"}}
***It should be noted that if something like basis is added to the rem dict it will overwrite
the default basis.***
"""
self.molecule = molecule
self.job_type = job_type
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.dft_rung = dft_rung
self.pcm_dielectric = pcm_dielectric
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
self.overwrite_inputs = overwrite_inputs
pcm_defaults = {
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1"
}
mypcm = {}
mysolvent = {}
myrem = {}
myrem["job_type"] = job_type
myrem["basis"] = self.basis_set
myrem["max_scf_cycles"] = self.max_scf_cycles
myrem["gen_scfman"] = "true"
myrem["scf_algorithm"] = self.scf_algorithm
if self.dft_rung == 1:
myrem["exchange"] = "B3LYP"
elif self.dft_rung == 2:
myrem["method"] = "B97-D3"
myrem["dft_D"] = "D3_BJ"
elif self.dft_rung == 3:
myrem["method"] = "B97M-rV"
elif self.dft_rung == 4:
myrem["method"] = "wb97xd"
elif self.dft_rung == 5:
myrem["method"] = "wB97M-V"
else:
raise ValueError("dft_rung should be between 1 and 5!")
if self.job_type.lower() == "opt":
myrem["geom_opt_max_cycles"] = self.geom_opt_max_cycles
if self.pcm_dielectric != None:
mypcm = pcm_defaults
mysolvent["dielectric"] = self.pcm_dielectric
myrem["solvent_method"] = 'pcm'
if self.overwrite_inputs:
for sec, sec_dict in self.overwrite_inputs.items():
if sec == "rem":
temp_rem = lower_and_check_unique(sec_dict)
for k, v in temp_rem.items():
myrem[k] = v
if sec == "pcm":
temp_pcm = lower_and_check_unique(sec_dict)
for k, v in temp_pcm.items():
mypcm[k] = v
if sec == "solvent":
temp_solvent = lower_and_check_unique(sec_dict)
for k, v in temp_solvent.items():
mysolvent[k] = v
super(QChemDictSet, self).__init__(
self.molecule, rem=myrem, pcm=mypcm, solvent=mysolvent)
class OptSet(QChemDictSet):
"""
QChemDictSet for a geometry optimization
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
geom_opt_max_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
super(OptSet, self).__init__(
molecule=molecule,
job_type="opt",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
geom_opt_max_cycles=self.geom_opt_max_cycles,
overwrite_inputs=overwrite_inputs)
class SinglePointSet(QChemDictSet):
"""
QChemDictSet for a single point calculation
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super(SinglePointSet, self).__init__(
molecule=molecule,
job_type="sp",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
overwrite_inputs=overwrite_inputs)
class FreqSet(QChemDictSet):
"""
QChemDictSet for a single point calculation
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super(FreqSet, self).__init__(
molecule=molecule,
job_type="freq",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
overwrite_inputs=overwrite_inputs)
| nisse3000/pymatgen | pymatgen/io/qchem_io/sets.py | Python | mit | 7,003 |
from __future__ import unicode_literals
import io
import os
import datetime
import tempfile
import unittest
import profig
# use str for unicode data and bytes for binary data
if not profig.PY3:
str = unicode
if profig.WIN:
try:
import winreg
except ImportError:
import _winreg as winreg
class TestBasic(unittest.TestCase):
def test_initial_state(self):
c = profig.Config()
self.assertEqual(dict(c), {})
self.assertEqual(c.sources, [])
def test_root(self):
c = profig.Config()
c['a'] = 1
self.assertEqual(c.root, c)
s = c.section('a')
self.assertEqual(s.root, c)
self.assertNotEqual(s.root, s)
def test_formats(self):
self.assertIn('ini', profig.Config.known_formats())
c = profig.Config()
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config(format='ini')
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config(format=profig.IniFormat)
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config()
c.set_format(profig.IniFormat(c))
self.assertIsInstance(c.format, profig.IniFormat)
with self.assertRaises(profig.UnknownFormatError):
c = profig.Config(format='marshmallow')
def test_keys(self):
c = profig.Config()
c['a'] = 1
c['a.a'] = 1
c[('a', 'a')] = 1
c[('a', ('a', 'a'))] = 1
with self.assertRaises(TypeError):
c[1] = 1
def test_unicode_keys(self):
c = profig.Config(encoding='shiftjis')
c[b'\xdc'] = 1
c[b'\xdc.\xdc'] = '\uff9c'
self.assertEqual(c[b'\xdc'], c['\uff9c'], 1)
self.assertEqual(c[b'\xdc.\xdc'], c['\uff9c.\uff9c'], '\uff9c')
def test_sync(self):
c = profig.Config()
with self.assertRaises(profig.NoSourcesError):
c.sync()
def test_len(self):
c = profig.Config()
self.assertEqual(len(c), 0)
c['a'] = 1
self.assertEqual(len(c), 1)
c['a.1'] = 1
self.assertEqual(len(c), 1)
self.assertEqual(len(c.section('a')), 1)
def test_init(self):
c = profig.Config()
c.init('a', 1)
c.init('a.a', 2)
self.assertEqual(c['a'], 1)
c['a'] = {'': 2, 'a': 3}
self.assertEqual(c['a'], 2)
self.assertEqual(c['a.a'], 3)
s = c.section('a')
s.convert(b'3')
self.assertEqual(s.value(), 3)
self.assertEqual(s._value, 3)
self.assertIs(s._type, int)
self.assertIs(s.default(), 1)
self.assertIs(s._default, 1)
def test_delayed_init(self):
c = profig.Config()
c['a'] = {'': '2', 'a': '3'}
self.assertEqual(c['a'], '2')
self.assertEqual(c['a.a'], '3')
s = c.section('a')
s.convert(b'3')
self.assertEqual(s.value(), '3')
self.assertEqual(s._value, '3')
self.assertIs(s._type, None)
self.assertIs(s._default, profig.NoValue)
with self.assertRaises(profig.NoValueError):
s.default()
c.init('a', 1)
c.init('a.a', 2)
self.assertEqual(c['a'], 3)
self.assertEqual(c['a.a'], 3)
s = c.section('a')
self.assertEqual(s.value(), 3)
self.assertEqual(s._value, 3)
self.assertIs(s._type, int)
self.assertIs(s.default(), 1)
self.assertIs(s._default, 1)
def test_get(self):
c = profig.Config()
c['a'] = 1
c.init('a.1', 1)
self.assertEqual(c.get('a'), 1)
self.assertEqual(c.get('a.1'), 1)
self.assertEqual(c.get('a.2'), None)
self.assertEqual(c.get('a.2', 2), 2)
def test_value(self):
c = profig.Config()
c['a'] = 1
c.init('b', 1)
s = c.section('c')
with self.assertRaises(profig.NoValueError):
s.value()
for key in ['a', 'b']:
s = c.section(key)
self.assertEqual(s.value(), 1)
def test_default(self):
c = profig.Config()
c['a'] = 1
c.init('b', 1)
s = c.section('a')
with self.assertRaises(profig.NoValueError):
s.default()
s = c.section('b')
self.assertEqual(s.default(), 1)
def test_set_value(self):
c = profig.Config()
c.init('c', 1)
c.section('a').set_value(2)
self.assertEqual(c['a'], 2)
c.section('b').set_value('3')
self.assertEqual(c['b'], '3')
# .init does not enforce types
c.section('c').set_value('4')
self.assertEqual(c['c'], '4')
def test_set_default(self):
c = profig.Config()
c.init('c', 1)
c.section('a').set_default(2)
self.assertEqual(c['a'], 2)
c.section('b').set_default('3')
self.assertEqual(c['b'], '3')
# .init does not enforce types
c.section('c').set_default('4')
self.assertEqual(c['c'], '4')
def test_section(self):
c = profig.Config()
with self.assertRaises(profig.InvalidSectionError):
c.section('a', create=False)
self.assertIs(c.section('a'), c._children['a'])
c['a.a.a'] = 1
child = c._children['a']._children['a']._children['a']
self.assertIs(c.section('a.a.a'), child)
self.assertIs(c.section('a').section('a').section('a'), child)
def test_as_dict(self):
c = profig.Config(dict_type=dict)
self.assertEqual(c.as_dict(), {})
c['a'] = 1
self.assertEqual(c.as_dict(), {'a': 1})
c['c.a'] = 1
self.assertEqual(c.as_dict(), {'a': 1, 'c': {'a': 1}})
c['b'] = 1
c['a.a'] = 1
self.assertEqual(c.as_dict(), {'a': {'': 1, 'a': 1}, 'b': 1, 'c': {'a': 1}})
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1, 'b': 1, 'c.a': 1})
def test_reset(self):
c = profig.Config(dict_type=dict)
c.init('a', 1)
c.init('a.a', 1)
c['a'] = 2
c['a.a'] = 2
self.assertEqual(c.as_dict(flat=True), {'a': 2, 'a.a': 2})
c.section('a').reset(recurse=False)
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 2})
c.section('a').reset()
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1})
c['a'] = 2
c['a.a'] = 2
c.reset()
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1})
class TestStrictMode(unittest.TestCase):
def setUp(self):
self.c = profig.Config(strict=True)
self.c.init('a', 1)
def test_set_init(self):
self.c['a'] = 3
self.assertEqual(self.c['a'], 3)
def test_set_uninit(self):
with self.assertRaises(profig.InvalidSectionError):
self.c['b'] = 3
with self.assertRaises(profig.InvalidSectionError):
self.c.section('b')
def test_read_uninit(self):
buf = io.BytesIO(b"""\
[a]
a = 1
""")
self.c.format.error_mode = 'exception'
with self.assertRaises(profig.InvalidSectionError):
self.c.read(buf)
def test_clear_uninit_on_sync(self):
buf = io.BytesIO(b"""\
[a]
a = 1
""")
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
""")
class TestIniFormat(unittest.TestCase):
def setUp(self):
self.c = profig.Config(format='ini')
self.c.init('a', 1)
self.c.init('b', 'value')
self.c.init('a.1', 2)
def test_basic(self):
del self.c['a.1']
buf = io.BytesIO()
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
[b] = value
""")
def test_sync_read_blank(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[b] = value
[a] = 1
1 = 2
""")
c.sync(buf)
self.assertEqual(c['a'], '1')
self.assertEqual(c['b'], 'value')
self.assertEqual(c['a.1'], '2')
def test_subsection(self):
buf = io.BytesIO()
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
1 = 2
[b] = value
""")
def test_preserve_order(self):
buf = io.BytesIO(b"""\
[a] = 1
1 = 2
[b] = value
""")
self.c['a.1'] = 3
self.c['a'] = 2
self.c['b'] = 'test'
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 2
1 = 3
[b] = test
""")
def test_preserve_comments(self):
buf = io.BytesIO(b"""\
;a comment
[a] = 1
; another comment
1 = 2
; yet more comments?
[b] = value
;arrrrgh!
""")
self.c['a.1'] = 3
self.c['a'] = 2
self.c['b'] = 'test'
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
; a comment
[a] = 2
; another comment
1 = 3
; yet more comments?
[b] = test
;arrrrgh!
""")
def test_binary_read(self):
fd, temppath = tempfile.mkstemp()
try:
with io.open(fd, 'wb') as file:
file.write(b"""\
[a] = \x00binary\xff
b = also\x00binary\xff
""")
c = profig.Config(temppath, format='ini')
c.init('a', b'')
c.init('a.b', b'')
c.read()
self.assertEqual(c['a'], b'\x00binary\xff')
self.assertEqual(c['a.b'], b'also\x00binary\xff')
finally:
os.remove(temppath)
def test_binary_write(self):
fd, temppath = tempfile.mkstemp()
try:
c = profig.Config(temppath, format='ini')
c['a'] = b'\x00binary\xff'
c.write()
with io.open(fd, 'rb') as file:
result = file.read()
self.assertEqual(result, b"""\
[a] = \x00binary\xff
""")
finally:
os.remove(temppath)
def test_unicode_read(self):
fd, temppath = tempfile.mkstemp()
try:
with io.open(fd, 'wb') as file:
file.write(b"""\
[\xdc] = \xdc
""")
c = profig.Config(temppath, format='ini', encoding='shiftjis')
c.read()
self.assertEqual(c['\uff9c'], '\uff9c')
finally:
os.remove(temppath)
def test_unicode_write(self):
fd, temppath = tempfile.mkstemp()
try:
c = profig.Config(temppath, format='ini', encoding='shiftjis')
c['\uff9c'] = '\uff9c'
c.write()
with io.open(fd, 'rb') as file:
result = file.read()
self.assertEqual(result, b"""\
[\xdc] = \xdc
""")
finally:
os.remove(temppath)
def test_repeated_values(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[a]
b = 1
b = 2
""")
c.sync(buf)
self.assertEqual(c['a.b'], '2')
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 2
""")
c['a.b'] = '3'
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 3
""")
def test_repeated_sections(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[a]
b = 1
b = 2
[b]
a = 1
[a]
b = 3
""")
c.sync(buf)
self.assertEqual(c['a.b'], '3')
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 3
[b]
a = 1
""")
class TestCoercer(unittest.TestCase):
def test_datetime_date(self):
c = profig.Config()
dt = datetime.date(2014, 12, 30)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 2014-12-30
""")
c.init('timestamp', datetime.datetime.now().date())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_datetime_time(self):
c = profig.Config()
dt = datetime.time(14, 45, 30, 655)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 14:45:30.000655
""")
c.init('timestamp', datetime.datetime.now().time())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_datetime_datetime(self):
c = profig.Config()
dt = datetime.datetime(2014, 12, 30, 14, 45, 30, 655)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 2014-12-30 14:45:30.000655
""")
c.init('timestamp', datetime.datetime.now())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_list_value(self):
c = profig.Config()
c.init('colors', ['red', 'blue'])
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[colors] = red, blue
""")
c.init('colors', [])
c.sync(buf)
self.assertEqual(c['colors'], ['red', 'blue'])
def test_path_value(self):
c = profig.Config()
c.init('paths', ['path1', 'path2'], 'path_list')
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), """\
[paths] = path1{sep}path2
""".format(sep=os.pathsep).encode('ascii'))
buf = io.BytesIO("""\
[paths] = path1{sep}path2{sep}path3
""".format(sep=os.pathsep).encode('ascii'))
c.sync(buf)
self.assertEqual(c['paths'], ['path1', 'path2', 'path3'])
def test_choice(self):
c = profig.Config()
c.coercer.register_choice('color', {1: 'red', 2: 'green', 3: 'blue'})
c.init('color', 1, 'color')
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[color] = red
""")
buf = io.BytesIO(b"""\
[color] = blue
""")
c.sync(buf)
self.assertEqual(c['color'], 3)
c['color'] = 4
with self.assertRaises(profig.AdaptError):
c.write(buf)
def test_not_exist_error(self):
c = profig.Config()
c.init('value', [], 'notexist')
buf = io.BytesIO()
with self.assertRaises(profig.NotRegisteredError):
c.write(buf)
c.init('value', [])
c['value'] = 3
with self.assertRaises(profig.AdaptError):
c.write(buf)
c.reset(clean=True)
c.init('value', 1)
buf = io.BytesIO(b"""[value] = badvalue""")
with self.assertRaises(profig.ConvertError):
c.read(buf)
class TestErrors(unittest.TestCase):
def test_FormatError(self):
c = profig.Config()
c.format.error_mode = 'exception'
buf = io.BytesIO(b"""a""")
with self.assertRaises(profig.FormatError):
c.sync(buf)
class TestMisc(unittest.TestCase):
def test_NoValue(self):
self.assertEqual(repr(profig.NoValue), 'NoValue')
def test_get_source(self):
path = os.path.dirname(__file__)
self.assertEqual(profig.get_source('test'), os.path.join(path, 'test'))
if profig.WIN:
class TestRegistryFormat(unittest.TestCase):
def setUp(self):
self.base_key = winreg.HKEY_CURRENT_USER
self.path = r'Software\_profig_test'
self.key = winreg.CreateKeyEx(self.base_key, self.path, 0,
winreg.KEY_ALL_ACCESS)
self.c = profig.Config(self.path, format='registry')
def tearDown(self):
self.c.format.delete(self.key)
def test_basic(self):
c = self.c
c.init('a', 1)
c.init('a.a', 2)
c.init('c', 'str')
c.sync()
k = winreg.OpenKeyEx(self.base_key, self.path)
self.assertEqual(winreg.QueryValueEx(k, 'c')[0], 'str')
k = winreg.OpenKeyEx(k, 'a')
self.assertEqual(winreg.QueryValueEx(k, '')[0], 1)
self.assertEqual(winreg.QueryValueEx(k, 'a')[0], 2)
def test_sync_read_blank(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(key, '1', 0, winreg.REG_DWORD, 2)
key = winreg.CreateKeyEx(self.key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, 'value')
c = self.c
c.read()
self.assertEqual(c['a'], 1)
self.assertEqual(c['a.1'], 2)
self.assertEqual(c['b'], 'value')
def test_sync_blank(self):
# in this test, the value for b will be read from
# '_profig_test\b\(Default)', but will be written back
# to '_profig_test\b'. 'b' has no children, so it's considered
# a root-level value.
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(key, '1', 0, winreg.REG_DWORD, 2)
key = winreg.CreateKeyEx(self.key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, 'value')
c = self.c
c.sync()
self.assertEqual(c['a'], 1)
self.assertEqual(c['a.1'], 2)
self.assertEqual(c['b'], 'value')
def test_binary_read(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_BINARY, b'\x00binary\xff')
key = winreg.CreateKeyEx(key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_BINARY, b'also\x00binary\xff')
c = self.c
c.init('a', b'')
c.init('a.b', b'')
c.read()
self.assertEqual(c['a'], b'\x00binary\xff')
self.assertEqual(c['a.b'], b'also\x00binary\xff')
def test_binary_write(self):
c = self.c
c['a'] = b'\x00binary\xff'
c.write()
value = winreg.QueryValueEx(self.key, 'a')[0]
self.assertEqual(value, b'\x00binary\xff')
def test_unicode_read(self):
key = winreg.CreateKeyEx(self.key, '\uff9c')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, '\uff9c')
c = self.c
c.init('\uff9c', '')
c.read()
self.assertEqual(c['\uff9c'], '\uff9c')
def test_unicode_write(self):
c = self.c
c.init('\uff9c', '\uff9c')
c.write()
value = winreg.QueryValueEx(self.key, '\uff9c')[0]
self.assertEqual(value, '\uff9c')
def test_unsupported_type_read(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, '1.11')
c = self.c
c.init('a', 1.11)
c.read()
self.assertEqual(c['a'], 1.11)
def test_unsupported_type_write(self):
c = self.c
c.init('a', 1.11)
c.write()
value = winreg.QueryValueEx(self.key, 'a')[0]
self.assertEqual(value, b'1.11')
if __name__ == '__main__':
# silence logging
import logging
logging.basicConfig(level=logging.CRITICAL)
unittest.main()
| firstworldproblems/journaltools | tests.py | Python | mit | 20,137 |
#!/usr/bin/env python
from setuptools import setup
setup(
name="vagrant-metadata",
version="1.1.2",
description="Vagrant metadata.json generator",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/lvillani/vagrant-metadata/",
author="Lorenzo Villani",
author_email="lorenzo@villani.me",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
],
scripts=["bin/vagrant-metadata"],
py_modules=["vagrant_metadata"],
)
| lvillani/vagrant-metadata | setup.py | Python | mit | 640 |
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"to_concat_dtypes, result_dtype",
[
(["Int64", "Int64"], "Int64"),
(["UInt64", "UInt64"], "UInt64"),
(["Int8", "Int8"], "Int8"),
(["Int8", "Int16"], "Int16"),
(["UInt8", "Int8"], "Int16"),
(["Int32", "UInt32"], "Int64"),
# this still gives object (awaiting float extension dtype)
(["Int64", "UInt64"], "object"),
],
)
def test_concat_series(to_concat_dtypes, result_dtype):
result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes])
expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype(
result_dtype
)
tm.assert_series_equal(result, expected)
| TomAugspurger/pandas | pandas/tests/arrays/integer/test_concat.py | Python | bsd-3-clause | 780 |
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codearena.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| rachitnaruzu/codearena | codearena/wsgi.py | Python | mit | 261 |
from __future__ import absolute_import
import inspect
import six
import sys
from sentry.utils.imports import import_string
PACKAGES = {
"django.db.backends.postgresql_psycopg2": "psycopg2.extensions",
"sentry.db.postgres": "psycopg2.extensions",
"django.core.cache.backends.memcached.MemcachedCache": "memcache",
"django.core.cache.backends.memcached.PyLibMCCache": "pylibmc",
}
def reraise_as(new_exception_or_type):
"""
Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py
>>> try:
>>> do_something_crazy()
>>> except Exception:
>>> reraise_as(UnhandledException)
"""
__traceback_hide__ = True # NOQA
e_type, e_value, e_traceback = sys.exc_info()
if inspect.isclass(new_exception_or_type):
new_type = new_exception_or_type
new_exception = new_exception_or_type()
else:
new_type = type(new_exception_or_type)
new_exception = new_exception_or_type
new_exception.__cause__ = e_value
try:
six.reraise(new_type, new_exception, e_traceback)
finally:
del e_traceback
def validate_settings(settings):
for key, engine_key, engine_type in [
("DATABASES", "ENGINE", "database engine"),
("CACHES", "BACKEND", "caching backend"),
]:
value = getattr(settings, key, {})
for alias in value:
engine = value[alias][engine_key]
if engine not in PACKAGES:
continue
validate_dependency(settings, engine_type, engine, PACKAGES[engine])
def validate_dependency(settings, dependency_type, dependency, package):
try:
import_string(package)
except ImportError:
msg = ConfigurationError.get_error_message("%s %s" % (dependency_type, dependency), package)
reraise_as(ConfigurationError(msg))
class ConfigurationError(ValueError):
"""
This error is thrown whenever a sentry configuration is wrong, or requires a third-party library
that's not installed properly or can't be found.
"""
@classmethod
def get_error_message(cls, dependency, package):
return (
"""Python could not find %(package)s in your current environment (required by %(dependency)s). If you have it installed, maybe you are using the wrong python binary to run sentry?"""
% {"dependency": dependency, "package": package}
)
| mvaled/sentry | src/sentry/utils/settings.py | Python | bsd-3-clause | 2,420 |
import sys
import re
import os
import pybars
import path
SEGMENTS = {
'FLASH': {
'size': 1 * 1024 * 1024,
'sections': ['.text', '.data']
},
'RAM': {
'size': 128 * 1024,
'sections': ['.bss', '.heap']
}
}
SECTION_HEADER = r"^(?P<name>\S+)\s+(?P<base>0x[0-9a-f]+)\s+(?P<size>0x[0-9a-f]+)"
SECTION_ARCHIVE = r"^\s+(?P<section_name>\S+)?\s*(?P<base>0x[0-9a-f]+)\s+(?P<size>0x[0-9a-f]+)\s+(?P<archive>.*?)(\((?P<object>\S+?)\))?$"
def skip_until(items, predicate):
yield_rest = False
for item in items:
if yield_rest:
yield item
continue
if not predicate(item):
yield_rest = True
yield item
def take_until(items, predicate):
for item in items:
if predicate(item):
yield item
else:
return
def symbols_sizes(section):
for line in section:
archive = re.match(SECTION_ARCHIVE, line)
if archive is None:
continue
yield {
'section_name': archive.group('section_name'),
'base': archive.group('base'),
'size': int(archive.group('size'), 16),
'archive': archive.group('archive'),
'object': archive.group('object')
}
def group_by_archive(symbols):
per_archive = {}
for r in symbols:
archive = str(path.path(r['archive']).basename())
if archive == '':
archive = r['section_name']
if not per_archive.has_key(archive):
per_archive[archive] = {'archive': archive, 'symbols': [], 'size': 0}
per_archive[archive]['symbols'].append(r)
per_archive[archive]['size'] += r['size']
for a in per_archive.values():
a['symbols'].sort(key=lambda s: -s['size'])
return sorted(per_archive.values(), key=lambda a: -a['size'])
def find_section_slice(lines, section_name):
sections = skip_until(lines, lambda x: not x.startswith(section_name))
result = None
i = 0
for line in sections:
i += 1
if line.strip() == '':
continue
header_match = re.match(SECTION_HEADER, line)
if header_match is not None:
name = header_match.group('name')
if name != section_name:
break
result = {
'header': name,
'base': header_match.group('base'),
'size': int(header_match.group('size'), 16),
'contents': []
}
else:
result['contents'].append(line)
result['symbols'] = symbols_sizes(result['contents'])
result['archives'] = group_by_archive(result['symbols'])
return result
def format_kilobytes(_, value):
return '%.2f KB' % (value / 1024.0)
def format_percent(_, value, total):
return '%.2f%%' % (value / float(total) * 100)
def report_segments_usage(memory_map, report_dir):
model = {'segments': []}
for segment_name in SEGMENTS:
segment = SEGMENTS[segment_name]
sections_in_segment = map(lambda s: find_section_slice(memory_map, s) or {'size': 0}, segment['sections'])
total = float(segment['size'])
used_size = sum(map(lambda s: s['size'], sections_in_segment))
percent_used = used_size / total
free_size = (total - used_size)
model['segments'].append({
'name': segment_name,
'total': total,
'used': used_size,
'free': free_size,
'sections': sections_in_segment
})
print '%s: Used: %.2f KB (%.2f %%)' % (segment_name, used_size/1024.0, percent_used * 100)
if not os.path.exists(report_dir):
os.makedirs(report_dir)
with open(os.path.dirname(os.path.realpath(__file__)) + "/memory_report.mustache", 'r') as f:
compiler = pybars.Compiler()
template = compiler.compile(unicode(f.read()))
with open(report_dir + '/index.html', 'w') as f:
output = template(model, helpers={
'kb': format_kilobytes,
'percent': format_percent
})
f.write(unicode(output))
def main(memory_map_file, report_dir):
with open(memory_map_file, 'r') as f:
memory_map = f.readlines()
report_segments_usage(memory_map, report_dir)
main(sys.argv[1], sys.argv[2])
| PW-Sat2/PWSat2OBC | utils/memory_report.py | Python | agpl-3.0 | 4,512 |
#!/usr/bin/env python
#pyNFC_preprocess.py
"""
pre-processing steps for pyNFC code. Basic geometry definition
and partitioning information created and input file created prior to
parallel simulation run
"""
import scipy.io
import math
import argparse
parser = argparse.ArgumentParser(prog='pyNFC_preprocess.py',
description='pre-processing script for pyNFC')
parser.add_argument('geom_filename',type=str)
parser.add_argument('lattice_type',type=str)
parser.add_argument('dynamics',type=int)
parser.add_argument('partition_style',type=str)
parser.add_argument('numProcs',type=int)
parser.add_argument('Num_ts',type=int)
parser.add_argument('ts_rep_freq',type=int)
parser.add_argument('Warmup_ts',type=int)
parser.add_argument('plot_freq',type=int)
parser.add_argument('Re',type=float)
parser.add_argument('dt',type=float)
parser.add_argument('Cs',type=float)
parser.add_argument('Restart_flag',type=int)
parser.add_argument('TimeAvg_flag',type=int)
parser.add_argument('SubspaceData_flag',type=int)
# parse input arguments
args = parser.parse_args()
# assign to required variables
geom_filename = args.geom_filename
lattice_type = args.lattice_type
dynamics = args.dynamics
partition_style = args.partition_style
numProcs = args.numProcs
#lattice_type = 'D3Q15' # [ 'D3Q15' | 'D3Q19' | 'D3Q27' ]
#partition_style = 'metis' # [ '1D' | '3D' | 'metis']
Num_ts = args.Num_ts
ts_rep_freq = args.ts_rep_freq
Warmup_ts = args.Warmup_ts
plot_freq = args.plot_freq
Re = args.Re
dt = args.dt
Cs = args.Cs
Restart_flag = args.Restart_flag
TimeAvg_flag=args.TimeAvg_flag
SubspaceData_flag = args.SubspaceData_flag
#----You should not have to edit anything below this line -------------------
geom_input = scipy.io.loadmat(geom_filename)
# overall domain dimensions
Lx_p = float(geom_input['Lx_p'])
Ly_p = float(geom_input['Ly_p'])
Lz_p = float(geom_input['Lz_p'])
Lo = float(geom_input['Lo'])
Ny_divs = int(geom_input['Ny_divs'])
rho_p = float(geom_input['rho_p'])
nu_p = float(geom_input['nu_p'])
pRef_idx = int(geom_input['pRef_idx'])
ndType = list((geom_input['ndType']).flatten())
ssNds = list((geom_input['ssNds']).flatten())
Ny = math.ceil((Ny_divs-1)*(Ly_p/Lo))+1
Nx = math.ceil((Ny_divs-1)*(Lx_p/Lo))+1
Nz = math.ceil((Ny_divs-1)*(Lz_p/Lo))+1
nnodes = Nx*Ny*Nz
# non-dimensionalization
Uo = nu_p*Re/Lo
To = Lo/Uo
Uavg = Uo
Ld = 1.; Td = 1.; Ud = (To/Lo)*Uavg;
nu_d = 1./Re
dx = 1./(Ny_divs - 1.)
u_lbm = (dt/dx)*Ud
nu_lbm = (dt/(dx**2))*nu_d
omega = 1./(3.*nu_lbm+0.5)
u_conv_fact = (dt/dx)*(To/Lo)
t_conv_fact = (dt*To)
l_conv_fact = dx*Lo
p_conv_fact = (((l_conv_fact/t_conv_fact)**2)*(1./3.))/(l_conv_fact**3)
#p_conv_fact = (((l_conv_fact/t_conv_fact)**2)*(1./3.))
rho_lbm = rho_p*(l_conv_fact**3)
#rho_lbm = rho_p
print 'There are %d nodes listed in ndType'%len(ndType)
print 'Writing those to file'
ndTypeFileName = 'ndType.lbm'
ndTypeFile = open(ndTypeFileName,'w')
for i in range(len(ndType)):
nT = int(ndType[i]);
ndTypeFile.write('%i \n'%nT)
ndTypeFile.close()
print 'There are %d nodes listed as subspace nodes'%len(ssNds)
print 'Writing those to file'
ssNdFileName = 'ssNds.lbm'
ssNdFile = open(ssNdFileName,'w')
for i in range(len(ssNds)):
ss = int(ssNds[i])
ssNdFile.write('%i \n'%ss)
ssNdFile.close()
print 'l_conv_fact = %g.\n'%l_conv_fact
print 'p_conv_fact = %g.\n'%p_conv_fact
print 'Number of lattice points = %d.' % nnodes
print 'Number of time-steps = %d.' % Num_ts
print 'LBM viscosity = %g.' % nu_lbm
print 'LBM relaxation parameter (omega) = %g.' % omega
print 'LBM flow Mach number = %g. ' % u_lbm
print 'Nx = %d' % Nx
print 'Ny = %d' % Ny
print 'Nz = %d' % Nz
#run_dec = raw_input('Would you like to continue? [Y/n]: ')
run_dec = 'y' # just let it run
if run_dec!='n' and run_dec!='N':
print 'Ok! Cross your fingers!!'
# write the input file
params = open('params.lbm','w')
params.write('%s \n'% lattice_type) # lattice selection (keep. We might actually use this)
params.write('%d \n'% dynamics)
params.write('%d \n'%Num_ts)
params.write('%d \n'%ts_rep_freq)
params.write('%d \n'%Warmup_ts)
params.write('%d \n'%plot_freq)
params.write('%g \n'%Cs)
params.write('%g \n'%rho_lbm) # density
params.write('%g \n'%u_lbm) # scaled maximum velocity
params.write('%g \n'%omega) # relaxation parameter
params.write('%d \n'%Nx) # number of nodes in the x, y and z direction
params.write('%d \n'%Ny)
params.write('%d \n'%Nz)
params.write('%d \n'%Restart_flag) # 1 = load restart data; 0 = no restart
params.write('%d \n'%TimeAvg_flag) # 1 = time average data; 0 = no time average
# the following will not be used by the MPI code, but will be available
# for the post-processing script
params.write('%f \n'%Lx_p) # physical dimensions in the x,y and z dimensions
params.write('%f \n'%Ly_p)
params.write('%f \n'%Lz_p)
params.write('%15.14f \n'%t_conv_fact) # time, length and pressure conversion factors
params.write('%15.14f \n'%l_conv_fact)
params.write('%g \n'%p_conv_fact)
params.write('%d \n'%pRef_idx);
params.write('%d \n'%SubspaceData_flag)#1 = collect subspace data; 0 do not
params.close()
else:
print 'Run aborted. Better luck next time!'
| stu314159/pyNFC | pyNFC_preprocess.py | Python | mit | 5,297 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'tko_point_of_sale_discount_cards',
'version': '0.032',
'description': 'This module applies selected discount on total',
'category': 'Customizations',
'sequence': 150,
'complexity': 'pos_customization',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'point_of_sale',
'tko_point_of_sale_discount_on_order',
],
'data': [
'security/ir.model.access.csv',
'point_of_sale_view.xml',
'static/src/xml/pos.xml',
],
'qweb' : ['static/src/xml/discount.xml',],
'init': [],
'demo': [],
'update': [],
'test': [], #YAML files with tests
'installable': True,
'application': False,
'auto_install': False, #If it's True, the modules will be auto-installed when all dependencies are installed
'certificate': '',
}
| bmya/tkobr-addons | tko_point_of_sale_discount_cards/__openerp__.py | Python | agpl-3.0 | 2,070 |
data = (
'Yu ', # 0x00
'Shui -|-Shei ', # 0x01
'Shen ', # 0x02
'Diao ', # 0x03
'Chan ', # 0x04
'Liang ', # 0x05
'Zhun ', # 0x06
'Sui ', # 0x07
'Tan ', # 0x08
'Shen ', # 0x09
'Yi ', # 0x0a
'Mou ', # 0x0b
'Chen ', # 0x0c
'Die ', # 0x0d
'Huang ', # 0x0e
'Jian ', # 0x0f
'Xie ', # 0x10
'Nue ', # 0x11
'Ye ', # 0x12
'Wei ', # 0x13
'E ', # 0x14
'Yu ', # 0x15
'Xuan ', # 0x16
'Chan ', # 0x17
'Zi ', # 0x18
'An ', # 0x19
'Yan ', # 0x1a
'Di ', # 0x1b
'Mi ', # 0x1c
'Pian ', # 0x1d
'Xu ', # 0x1e
'Mo ', # 0x1f
'Dang ', # 0x20
'Su ', # 0x21
'Xie ', # 0x22
'Yao ', # 0x23
'Bang ', # 0x24
'Shi ', # 0x25
'Qian ', # 0x26
'Mi ', # 0x27
'Jin ', # 0x28
'Man ', # 0x29
'Zhe ', # 0x2a
'Jian ', # 0x2b
'Miu ', # 0x2c
'Tan ', # 0x2d
'Zen ', # 0x2e
'Qiao ', # 0x2f
'Lan ', # 0x30
'Pu ', # 0x31
'Jue ', # 0x32
'Yan ', # 0x33
'Qian ', # 0x34
'Zhan ', # 0x35
'Chen ', # 0x36
'Gu ', # 0x37
'Qian ', # 0x38
'Hong ', # 0x39
'Xia ', # 0x3a
'Jue ', # 0x3b
'Hong ', # 0x3c
'Han ', # 0x3d
'Hong ', # 0x3e
'Xi ', # 0x3f
'Xi ', # 0x40
'Huo ', # 0x41
'Liao ', # 0x42
'Han ', # 0x43
'Du ', # 0x44
'Long ', # 0x45
'Dou ', # 0x46
'Jiang ', # 0x47
'Qi ', # 0x48
'Shi ', # 0x49
'Li ', # 0x4a
'Deng ', # 0x4b
'Wan ', # 0x4c
'Bi ', # 0x4d
'Shu ', # 0x4e
'Xian ', # 0x4f
'Feng ', # 0x50
'Zhi ', # 0x51
'Zhi ', # 0x52
'Yan ', # 0x53
'Yan ', # 0x54
'Shi ', # 0x55
'Chu ', # 0x56
'Hui ', # 0x57
'Tun ', # 0x58
'Yi ', # 0x59
'Tun ', # 0x5a
'Yi ', # 0x5b
'Jian ', # 0x5c
'Ba ', # 0x5d
'Hou ', # 0x5e
'E ', # 0x5f
'Cu ', # 0x60
'Xiang ', # 0x61
'Huan ', # 0x62
'Jian ', # 0x63
'Ken ', # 0x64
'Gai ', # 0x65
'Qu ', # 0x66
'Fu ', # 0x67
'Xi ', # 0x68
'Bin ', # 0x69
'Hao ', # 0x6a
'Yu ', # 0x6b
'Zhu ', # 0x6c
'Jia ', # 0x6d
'[?] ', # 0x6e
'Xi ', # 0x6f
'Bo ', # 0x70
'Wen ', # 0x71
'Huan ', # 0x72
'Bin ', # 0x73
'Di ', # 0x74
'Zong ', # 0x75
'Fen ', # 0x76
'Yi ', # 0x77
'Zhi ', # 0x78
'Bao ', # 0x79
'Chai ', # 0x7a
'Han ', # 0x7b
'Pi ', # 0x7c
'Na ', # 0x7d
'Pi ', # 0x7e
'Gou ', # 0x7f
'Na ', # 0x80
'You ', # 0x81
'Diao ', # 0x82
'Mo ', # 0x83
'Si ', # 0x84
'Xiu ', # 0x85
'Huan ', # 0x86
'Kun ', # 0x87
'He ', # 0x88
'He ', # 0x89
'Mo ', # 0x8a
'Han ', # 0x8b
'Mao ', # 0x8c
'Li ', # 0x8d
'Ni ', # 0x8e
'Bi ', # 0x8f
'Yu ', # 0x90
'Jia ', # 0x91
'Tuan ', # 0x92
'Mao ', # 0x93
'Pi ', # 0x94
'Xi ', # 0x95
'E ', # 0x96
'Ju ', # 0x97
'Mo ', # 0x98
'Chu ', # 0x99
'Tan ', # 0x9a
'Huan ', # 0x9b
'Jue ', # 0x9c
'Bei ', # 0x9d
'Zhen ', # 0x9e
'Yuan ', # 0x9f
'Fu ', # 0xa0
'Cai ', # 0xa1
'Gong ', # 0xa2
'Te ', # 0xa3
'Yi ', # 0xa4
'Hang ', # 0xa5
'Wan ', # 0xa6
'Pin ', # 0xa7
'Huo ', # 0xa8
'Fan ', # 0xa9
'Tan ', # 0xaa
'Guan ', # 0xab
'Ze ', # 0xac
'Zhi ', # 0xad
'Er ', # 0xae
'Zhu ', # 0xaf
'Shi ', # 0xb0
'Bi ', # 0xb1
'Zi ', # 0xb2
'Er ', # 0xb3
'Gui ', # 0xb4
'Pian ', # 0xb5
'Bian ', # 0xb6
'Mai ', # 0xb7
'Dai ', # 0xb8
'Sheng ', # 0xb9
'Kuang ', # 0xba
'Fei ', # 0xbb
'Tie ', # 0xbc
'Yi ', # 0xbd
'Chi ', # 0xbe
'Mao ', # 0xbf
'He ', # 0xc0
'Bi ', # 0xc1
'Lu ', # 0xc2
'Ren ', # 0xc3
'Hui ', # 0xc4
'Gai ', # 0xc5
'Pian ', # 0xc6
'Zi ', # 0xc7
'Jia ', # 0xc8
'Xu ', # 0xc9
'Zei ', # 0xca
'Jiao ', # 0xcb
'Gai ', # 0xcc
'Zang ', # 0xcd
'Jian ', # 0xce
'Ying ', # 0xcf
'Xun ', # 0xd0
'Zhen ', # 0xd1
'She ', # 0xd2
'Bin ', # 0xd3
'Bin ', # 0xd4
'Qiu ', # 0xd5
'She ', # 0xd6
'Chuan ', # 0xd7
'Zang ', # 0xd8
'Zhou ', # 0xd9
'Lai ', # 0xda
'Zan ', # 0xdb
'Si ', # 0xdc
'Chen ', # 0xdd
'Shang ', # 0xde
'Tian ', # 0xdf
'Pei ', # 0xe0
'Geng ', # 0xe1
'Xian ', # 0xe2
'Mai ', # 0xe3
'Jian ', # 0xe4
'Sui ', # 0xe5
'Fu ', # 0xe6
'Tan ', # 0xe7
'Cong ', # 0xe8
'Cong ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Zhang ', # 0xec
'Du ', # 0xed
'Jin ', # 0xee
'Xiong ', # 0xef
'Shun ', # 0xf0
'Yun ', # 0xf1
'Bao ', # 0xf2
'Zai ', # 0xf3
'Lai ', # 0xf4
'Feng ', # 0xf5
'Cang ', # 0xf6
'Ji ', # 0xf7
'Sheng ', # 0xf8
'Ai ', # 0xf9
'Zhuan ', # 0xfa
'Fu ', # 0xfb
'Gou ', # 0xfc
'Sai ', # 0xfd
'Ze ', # 0xfe
'Liao ', # 0xff
)
| Memrise/unidecode | unidecode/x08c.py | Python | gpl-2.0 | 4,638 |
import time
import sys
import _mysql
import random
import string
import re
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import selenium.webdriver.chrome.service as service
try:
# Check to see if it was added
db=_mysql.connect('localhost','root','root','paws_db')
rand_fname=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_lname=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
rand_mail=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
db.query('INSERT INTO fosters (first_name,last_name,address,email,created,is_deleted,notes,avail,exp) VALUES("'+rand_fname+'","'+rand_lname+'","55 Gato Way","'+rand_mail+'@mail.com",NOW(),true,"Notes","Avail","Experience");');
db.store_result()
db.query("SELECT id,first_name FROM fosters where last_name=\""+rand_lname+"\" AND email=\""+rand_mail+"@mail.com\"")
r=db.store_result()
k=r.fetch_row(1,1)
foster_id = k[0].get('id')
service = service.Service('D:\ChromeDriver\chromedriver')
service.start()
capabilities = {'chrome.binary': 'C:\Program Files (x86)\Google\Chrome\Application\chrome'} # Chrome path is different for everyone
driver = webdriver.Remote(service.service_url, capabilities)
driver.set_window_size(sys.argv[1], sys.argv[2]);
driver.get('http://localhost:8765');
driver.find_element_by_id('email').send_keys('theparrotsarecoming@gmail.com')
driver.find_element_by_id('password').send_keys('password')
driver.find_element_by_css_selector('input[type="submit"]').click()
driver.get('http://localhost:8765/fosters/edit/'+foster_id);
name = driver.find_element_by_id("first-name")
fname_name = name.get_attribute("value")
new_rand_name=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
name.clear()
name.send_keys(new_rand_name);
submit = driver.find_element_by_id("FosterEdit")
submit.location_once_scrolled_into_view
submit.click()
db.query("SELECT first_name FROM fosters where id="+foster_id)
r=db.store_result()
k=r.fetch_row(1,1)
new_foster_name = k[0].get('first_name')
if new_rand_name == str(new_foster_name,"utf-8"):
print("pass")
else:
print("fail")
driver.quit()
except Exception as e:
print(e)
print("fail")
| TheParrotsAreComing/PAWS | TestingAssets/Fosters/foster_edit.py | Python | mit | 2,306 |
from codeschool.tests import *
from cs_auth.factories import *
# Fixtures
register(FriendshipStatusFactory)
class TestURLs(URLBaseTester):
public_urls = [
'/',
'/accounts/login',
'/accounts/signout',
]
login_urls = [
'/accounts/{user.username}/edit',
'/accounts/{user.username}/password',
'/accounts/{user.username}/email',
]
@pytest.mark.django_db
def test_can_login(self, client, user_with_password, password):
url = '/accounts/login'
response = client.post(url, {
'identification': user_with_password.username,
'password': password,
'action': 'signin',
})
self.expect(200, 400, response, url)
| jonnatas/codeschool | old/cs_auth/test_app.py | Python | gpl-3.0 | 740 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# (c) 2013, Yeukhon Wong <yeukhon@acm.org>
# (c) 2014, Nate Coraor <nate@bx.psu.edu>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hg
short_description: Manages Mercurial (hg) repositories.
description:
- Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
version_added: "1.0"
author: "Yeukhon Wong (@yeukhon)"
options:
repo:
description:
- The repository address.
required: true
default: null
aliases: [ name ]
dest:
description:
- Absolute path of where the repository should be cloned to.
This parameter is required, unless clone and update are set to no
required: true
default: null
revision:
description:
- Equivalent C(-r) option in hg command which could be the changeset, revision number,
branch name or even tag.
required: false
default: null
aliases: [ version ]
force:
description:
- Discards uncommitted changes. Runs C(hg update -C). Prior to
1.9, the default was `yes`.
required: false
default: "no"
choices: [ "yes", "no" ]
purge:
description:
- Deletes untracked files. Runs C(hg purge).
required: false
default: "no"
choices: [ "yes", "no" ]
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
description:
- If C(no), do not retrieve new revisions from the origin repository
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.3"
description:
- If C(no), do not clone the repository if it does not exist locally.
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to hg executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
notes:
- "This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156)."
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
requirements: [ ]
'''
EXAMPLES = '''
# Ensure the current working copy is inside the stable branch and deletes untracked files if any.
- hg:
repo: https://bitbucket.org/user/repo1
dest: /home/user/repo1
revision: stable
purge: yes
# Example just get information about the repository whether or not it has
# already been cloned locally.
- hg:
repo: git://bitbucket.org/user/repo
dest: /srv/checkout
clone: no
update: no
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class Hg(object):
def __init__(self, module, dest, repo, revision, hg_path):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.hg_path = hg_path
def _command(self, args_list):
(rc, out, err) = self.module.run_command([self.hg_path] + args_list)
return (rc, out, err)
def _list_untracked(self):
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
return self._command(args)
def get_revision(self):
"""
hg id -b -i -t returns a string in the format:
"<changeset>[+] <branch_name> <tag>"
This format lists the state of the current working copy,
and indicates whether there are uncommitted changes by the
plus sign. Otherwise, the sign is omitted.
Read the full description via hg id --help
"""
(rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
else:
return to_native(out).strip('\n')
def get_remote_revision(self):
(rc, out, err) = self._command(['id', self.repo])
if rc != 0:
self.module.fail_json(msg=err)
else:
return to_native(out).strip('\n')
def has_local_mods(self):
now = self.get_revision()
if '+' in now:
return True
else:
return False
def discard(self):
before = self.has_local_mods()
if not before:
return False
args = ['update', '-C', '-R', self.dest, '-r', '.']
(rc, out, err) = self._command(args)
if rc != 0:
self.module.fail_json(msg=err)
after = self.has_local_mods()
if before != after and not after: # no more local modification
return True
def purge(self):
# before purge, find out if there are any untracked files
(rc1, out1, err1) = self._list_untracked()
if rc1 != 0:
self.module.fail_json(msg=err1)
# there are some untrackd files
if out1 != '':
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
(rc2, out2, err2) = self._command(args)
if rc2 != 0:
self.module.fail_json(msg=err2)
return True
else:
return False
def cleanup(self, force, purge):
discarded = False
purged = False
if force:
discarded = self.discard()
if purge:
purged = self.purge()
if discarded or purged:
return True
else:
return False
def pull(self):
return self._command(
['pull', '-R', self.dest, self.repo])
def update(self):
if self.revision is not None:
return self._command(['update', '-r', self.revision, '-R', self.dest])
return self._command(['update', '-R', self.dest])
def clone(self):
if self.revision is not None:
return self._command(['clone', self.repo, self.dest, '-r', self.revision])
return self._command(['clone', self.repo, self.dest])
@property
def at_revision(self):
"""
There is no point in pulling from a potentially down/slow remote site
if the desired changeset is already the current changeset.
"""
if self.revision is None or len(self.revision) < 7:
# Assume it's a rev number, tag, or branch
return False
(rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
if out.startswith(self.revision):
return True
return False
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
repo = dict(required=True, aliases=['name']),
dest = dict(type='path'),
revision = dict(default=None, aliases=['version']),
force = dict(default='no', type='bool'),
purge = dict(default='no', type='bool'),
update = dict(default='yes', type='bool'),
clone = dict(default='yes', type='bool'),
executable = dict(default=None),
),
)
repo = module.params['repo']
dest = module.params['dest']
revision = module.params['revision']
force = module.params['force']
purge = module.params['purge']
update = module.params['update']
clone = module.params['clone']
hg_path = module.params['executable'] or module.get_bin_path('hg', True)
if dest is not None:
hgrc = os.path.join(dest, '.hg/hgrc')
# initial states
before = ''
changed = False
cleaned = False
if not dest and (clone or update):
module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
hg = Hg(module, dest, repo, revision, hg_path)
# If there is no hgrc file, then assume repo is absent
# and perform clone. Otherwise, perform pull and update.
if not clone and not update:
out = hg.get_remote_revision()
module.exit_json(after=out, changed=False)
if not os.path.exists(hgrc):
if clone:
(rc, out, err) = hg.clone()
if rc != 0:
module.fail_json(msg=err)
else:
module.exit_json(changed=False)
elif not update:
# Just return having found a repo already in the dest path
before = hg.get_revision()
elif hg.at_revision:
# no update needed, don't pull
before = hg.get_revision()
# but force and purge if desired
cleaned = hg.cleanup(force, purge)
else:
# get the current state before doing pulling
before = hg.get_revision()
# can perform force and purge
cleaned = hg.cleanup(force, purge)
(rc, out, err) = hg.pull()
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = hg.update()
if rc != 0:
module.fail_json(msg=err)
after = hg.get_revision()
if before != after or cleaned:
changed = True
module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
if __name__ == '__main__':
main()
| jmighion/ansible | lib/ansible/modules/source_control/hg.py | Python | gpl-3.0 | 9,896 |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from hashlib import md5
from mock import MagicMock, patch
import httplib
import json
import os
import socket
import swiftclient
import swiftclient.client as swift_client
import uuid
from oslo_log import log as logging
from swiftclient import client as swift
from trove.common.i18n import _ # noqa
LOG = logging.getLogger(__name__)
class FakeSwiftClient(object):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
@classmethod
def Connection(self, *args, **kargs):
LOG.debug("fake FakeSwiftClient Connection")
return FakeSwiftConnection()
class FakeSwiftConnection(object):
"""Logging calls instead of executing."""
MANIFEST_HEADER_KEY = 'X-Object-Manifest'
url = 'http://mockswift/v1'
def __init__(self, *args, **kwargs):
self.manifest_prefix = None
self.manifest_name = None
self.container_objects = {}
def get_auth(self):
return (
u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89",
u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3'
u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi'
u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz'
u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7'
u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy'
u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4'
u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5')
def get_account(self):
return ({'content-length': '2', 'accept-ranges': 'bytes',
'x-timestamp': '1363049003.92304',
'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53',
'date': 'Tue, 10 Mar 2013 00:43:23 GMT',
'x-account-bytes-used': '0',
'x-account-container-count': '0',
'content-type': 'application/json; charset=utf-8',
'x-account-object-count': '0'}, [])
def head_container(self, container):
LOG.debug("fake head_container(%s)" % container)
if container == 'missing_container':
raise swift.ClientException('fake exception',
http_status=httplib.NOT_FOUND)
elif container == 'unauthorized_container':
raise swift.ClientException('fake exception',
http_status=httplib.UNAUTHORIZED)
elif container == 'socket_error_on_head':
raise socket.error(111, 'ECONNREFUSED')
pass
def put_container(self, container):
LOG.debug("fake put_container(%s)" % container)
pass
def get_container(self, container, **kwargs):
LOG.debug("fake get_container(%s)" % container)
fake_header = None
fake_body = [{'name': 'backup_001'},
{'name': 'backup_002'},
{'name': 'backup_003'}]
return fake_header, fake_body
def head_object(self, container, name):
LOG.debug("fake put_container(%(container)s, %(name)s)" %
{'container': container, 'name': name})
checksum = md5()
if self.manifest_prefix and self.manifest_name == name:
for object_name in sorted(self.container_objects.iterkeys()):
object_checksum = md5(self.container_objects[object_name])
# The manifest file etag for a HEAD or GET is the checksum of
# the concatenated checksums.
checksum.update(object_checksum.hexdigest())
# this is included to test bad swift segment etags
if name.startswith("bad_manifest_etag_"):
return {'etag': '"this_is_an_intentional_bad_manifest_etag"'}
else:
if name in self.container_objects:
checksum.update(self.container_objects[name])
else:
return {'etag': 'fake-md5-sum'}
# Currently a swift HEAD object returns etag with double quotes
return {'etag': '"%s"' % checksum.hexdigest()}
def get_object(self, container, name, resp_chunk_size=None):
LOG.debug("fake get_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_get':
raise socket.error(111, 'ECONNREFUSED')
if 'metadata' in name:
fake_object_header = None
metadata = {}
if container == 'unsupported_version':
metadata['version'] = '9.9.9'
else:
metadata['version'] = '1.0.0'
metadata['backup_id'] = 123
metadata['volume_id'] = 123
metadata['backup_name'] = 'fake backup'
metadata['backup_description'] = 'fake backup description'
metadata['created_at'] = '2013-02-19 11:20:54,805'
metadata['objects'] = [{
'backup_001': {'compression': 'zlib', 'length': 10},
'backup_002': {'compression': 'zlib', 'length': 10},
'backup_003': {'compression': 'zlib', 'length': 10}
}]
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
fake_object_body = metadata_json
return (fake_object_header, fake_object_body)
fake_header = {'etag': '"fake-md5-sum"'}
if resp_chunk_size:
def _object_info():
length = 0
while length < (1024 * 1024):
yield os.urandom(resp_chunk_size)
length += resp_chunk_size
fake_object_body = _object_info()
else:
fake_object_body = os.urandom(1024 * 1024)
return (fake_header, fake_object_body)
def put_object(self, container, name, contents, **kwargs):
LOG.debug("fake put_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_put':
raise socket.error(111, 'ECONNREFUSED')
headers = kwargs.get('headers', {})
object_checksum = md5()
if self.MANIFEST_HEADER_KEY in headers:
# the manifest prefix format is <container>/<prefix> where
# container is where the object segments are in and prefix is the
# common prefix for all segments.
self.manifest_prefix = headers.get(self.MANIFEST_HEADER_KEY)
self.manifest_name = name
object_checksum.update(contents)
else:
if hasattr(contents, 'read'):
chunk_size = 128
object_content = ""
chunk = contents.read(chunk_size)
while chunk:
object_content += chunk
object_checksum.update(chunk)
chunk = contents.read(chunk_size)
self.container_objects[name] = object_content
else:
object_checksum.update(contents)
self.container_objects[name] = contents
# this is included to test bad swift segment etags
if name.startswith("bad_segment_etag_"):
return "this_is_an_intentional_bad_segment_etag"
return object_checksum.hexdigest()
def post_object(self, container, name, headers={}):
LOG.debug("fake post_object(%(container)s, %(name)s, %(head)s)" %
{'container': container, 'name': name, 'head': str(headers)})
def delete_object(self, container, name):
LOG.debug("fake delete_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_delete':
raise socket.error(111, 'ECONNREFUSED')
pass
class Patcher(object):
"""Objects that need to mock global symbols throughout their existence
should extend this base class.
The object acts as a context manager which, when used in conjunction with
the 'with' statement, terminates all running patchers when it leaves the
scope.
"""
def __init__(self):
self.__patchers = None
def __enter__(self):
self.__patchers = []
return self
def __exit__(self, type, value, traceback):
# Stop patchers in the LIFO order.
while self.__patchers:
self.__patchers.pop().stop()
def _start_patcher(self, patcher):
"""All patchers started by this method will be automatically
terminated on __exit__().
"""
self.__patchers.append(patcher)
return patcher.start()
class SwiftClientStub(Patcher):
"""
Component for controlling behavior of Swift Client Stub. Instantiated
before tests are invoked in "fake" mode. Invoke methods to control
behavior so that systems under test can interact with this as it is a
real swift client with a real backend
example:
if FAKE:
swift_stub = SwiftClientStub()
swift_stub.with_account('xyz')
# returns swift account info and auth token
component_using_swift.get_swift_account()
if FAKE:
swift_stub.with_container('test-container-name')
# returns swift container information - mostly faked
component_using.swift.create_container('test-container-name')
component_using_swift.get_container_info('test-container-name')
if FAKE:
swift_stub.with_object('test-container-name', 'test-object-name',
'test-object-contents')
# returns swift object info and contents
component_using_swift.create_object('test-container-name',
'test-object-name', 'test-contents')
component_using_swift.get_object('test-container-name', 'test-object-name')
if FAKE:
swift_stub.without_object('test-container-name', 'test-object-name')
# allows object to be removed ONCE
component_using_swift.remove_object('test-container-name',
'test-object-name')
# throws ClientException - 404
component_using_swift.get_object('test-container-name', 'test-object-name')
component_using_swift.remove_object('test-container-name',
'test-object-name')
if FAKE:
swift_stub.without_object('test-container-name', 'test-object-name')
# allows container to be removed ONCE
component_using_swift.remove_container('test-container-name')
# throws ClientException - 404
component_using_swift.get_container('test-container-name')
component_using_swift.remove_container('test-container-name')
"""
def __init__(self):
super(SwiftClientStub, self).__init__()
self._connection = swift_client.Connection()
self._containers = {}
self._containers_list = []
self._objects = {}
def _remove_object(self, name, some_list):
idx = [i for i, obj in enumerate(some_list) if obj['name'] == name]
if len(idx) == 1:
del some_list[idx[0]]
def _ensure_object_exists(self, container, name):
self._connection.get_object(container, name)
def with_account(self, account_id):
"""
setups up account headers
example:
if FAKE:
swift_stub = SwiftClientStub()
swift_stub.with_account('xyz')
# returns swift account info and auth token
component_using_swift.get_swift_account()
:param account_id: account id
"""
def account_resp():
return ({'content-length': '2', 'accept-ranges': 'bytes',
'x-timestamp': '1363049003.92304',
'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53',
'date': 'Tue, 10 Mar 2013 00:43:23 GMT',
'x-account-bytes-used': '0',
'x-account-container-count': '0',
'content-type': 'application/json; charset=utf-8',
'x-account-object-count': '0'}, self._containers_list)
get_auth_return_value = (
u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89",
u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3'
u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi'
u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz'
u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7'
u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy'
u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4'
u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5')
get_auth_patcher = patch.object(
swift_client.Connection, 'get_auth',
MagicMock(return_value=get_auth_return_value))
self._start_patcher(get_auth_patcher)
get_account_patcher = patch.object(
swift_client.Connection, 'get_account',
MagicMock(return_value=account_resp()))
self._start_patcher(get_account_patcher)
return self
def _create_container(self, container_name):
container = {'count': 0, 'bytes': 0, 'name': container_name}
self._containers[container_name] = container
self._containers_list.append(container)
self._objects[container_name] = []
def _ensure_container_exists(self, container):
self._connection.get_container(container)
def _delete_container(self, container):
self._remove_object(container, self._containers_list)
del self._containers[container]
del self._objects[container]
def with_container(self, container_name):
"""
sets expectations for creating a container and subsequently getting its
information
example:
if FAKE:
swift_stub.with_container('test-container-name')
# returns swift container information - mostly faked
component_using.swift.create_container('test-container-name')
component_using_swift.get_container_info('test-container-name')
:param container_name: container name that is expected to be created
"""
def container_resp(container):
return ({'content-length': '2', 'x-container-object-count': '0',
'accept-ranges': 'bytes', 'x-container-bytes-used': '0',
'x-timestamp': '1363370869.72356',
'x-trans-id': 'tx7731801ac6ec4e5f8f7da61cde46bed7',
'date': 'Fri, 10 Mar 2013 18:07:58 GMT',
'content-type': 'application/json; charset=utf-8'},
self._objects[container])
# if this is called multiple times then nothing happens
put_container_patcher = patch.object(swift_client.Connection,
'put_container')
self._start_patcher(put_container_patcher)
def side_effect_func(*args, **kwargs):
if args[0] in self._containers:
return container_resp(args[0])
else:
raise swiftclient.ClientException('Resource Not Found',
http_status=404)
self._create_container(container_name)
# return container headers
get_container_patcher = patch.object(
swift_client.Connection, 'get_container',
MagicMock(side_effect=side_effect_func))
self._start_patcher(get_container_patcher)
return self
def without_container(self, container):
"""
sets expectations for removing a container and subsequently throwing an
exception for further interactions
example:
if FAKE:
swift_stub.without_container('test-container-name')
# returns swift container information - mostly faked
component_using.swift.remove_container('test-container-name')
# throws exception "Resource Not Found - 404"
component_using_swift.get_container_info('test-container-name')
:param container: container name that is expected to be removed
"""
# first ensure container
self._ensure_container_exists(container)
self._delete_container(container)
return self
def with_object(self, container, name, contents):
"""
sets expectations for creating an object and subsequently getting its
contents
example:
if FAKE:
swift_stub.with_object('test-container-name', 'test-object-name',
'test-object-contents')
# returns swift object info and contents
component_using_swift.create_object('test-container-name',
'test-object-name', 'test-contents')
component_using_swift.get_object('test-container-name',
'test-object-name')
:param container: container name that is the object belongs
:param name: the name of the object expected to be created
:param contents: the contents of the object
"""
put_object_patcher = patch.object(
swift_client.Connection, 'put_object',
MagicMock(return_value=uuid.uuid1()))
self._start_patcher(put_object_patcher)
def side_effect_func(*args, **kwargs):
if (args[0] in self._containers and
args[1] in map(lambda x: x['name'],
self._objects[args[0]])):
return (
{'content-length': len(contents), 'accept-ranges': 'bytes',
'last-modified': 'Mon, 10 Mar 2013 01:06:34 GMT',
'etag': 'eb15a6874ce265e2c3eb1b4891567bab',
'x-timestamp': '1363568794.67584',
'x-trans-id': 'txef3aaf26c897420c8e77c9750ce6a501',
'date': 'Mon, 10 Mar 2013 05:35:14 GMT',
'content-type': 'application/octet-stream'},
[obj for obj in self._objects[args[0]]
if obj['name'] == args[1]][0]['contents'])
else:
raise swiftclient.ClientException('Resource Not Found',
http_status=404)
get_object_patcher = patch.object(
swift_client.Connection, 'get_object',
MagicMock(side_effect=side_effect_func))
self._start_patcher(get_object_patcher)
self._remove_object(name, self._objects[container])
self._objects[container].append(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': name,
'content_type': 'application/octet-stream', 'contents': contents})
return self
def without_object(self, container, name):
"""
sets expectations for deleting an object
example:
if FAKE:
swift_stub.without_object('test-container-name', 'test-object-name')
# allows container to be removed ONCE
component_using_swift.remove_container('test-container-name')
# throws ClientException - 404
component_using_swift.get_container('test-container-name')
component_using_swift.remove_container('test-container-name')
:param container: container name that is the object belongs
:param name: the name of the object expected to be removed
"""
self._ensure_container_exists(container)
self._ensure_object_exists(container, name)
def side_effect_func(*args, **kwargs):
if not [obj for obj in self._objects[args[0]]
if obj['name'] == [args[1]]]:
raise swiftclient.ClientException('Resource Not found',
http_status=404)
else:
return None
delete_object_patcher = patch.object(
swift_client.Connection, 'delete_object',
MagicMock(side_effect=side_effect_func))
self._start_patcher(delete_object_patcher)
self._remove_object(name, self._objects[container])
return self
def fake_create_swift_client(calculate_etag=False, *args):
return FakeSwiftClient.Connection(*args)
| fabian4/trove | trove/tests/fakes/swift.py | Python | apache-2.0 | 21,028 |
#!/usr/bin/env python
import os
import sys
import argparse
import json
# tempate2json.py -k SYSTEM=lonestar.tacc.utexas.edu
# PATH=/home/vaughn/apps -i template.jsonx -o file.json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-k", dest='keys', help='Space-delimited VAR=Value sets', nargs='*')
parser.add_argument("-i", dest='input', help='Input (template.jsonx)')
parser.add_argument("-o", dest="output", help="Output (output.json)")
args = parser.parse_args()
try:
with open(args.input) as f:
s = f.read()
except TypeError, e:
print >> sys.stderr, "[FATAL]", "No filename was provided for -i"
sys.exit(1)
except IOError, e:
print >> sys.stderr, "[FATAL]", args.input, "was not available for reading"
print >> sys.stderr, "Exception: %s" % str(e)
sys.exit(1)
# Iterate through document, replacing variables with values
for kvp in args.keys:
try:
(key, val) = kvp.split('=')
except ValueError:
print '[WARN]', kvp, 'not a valid VAR=Value pair'
keyname = '${' + key + '}'
s = s.replace(keyname, val)
# Print out to JSON
jsonDoc = json.loads(s)
outpath = os.path.dirname(args.output)
if outpath:
if not os.path.exists(os.path.dirname(args.output)):
try:
os.makedirs(os.path.dirname(args.output))
except OSError as exc: # Guard against race condition
print >> sys.stderr, "Exception: %s" % str(exc)
sys.exit(1)
with open(args.output, 'w') as outfile:
json.dump(jsonDoc, outfile, indent=4)
| iPlantCollaborativeOpenSource/cyverse-sdk | src/scripts/template2json.py | Python | bsd-3-clause | 1,718 |
from typing import TypeVar, Any, Union, Callable, List, Tuple, Optional
import ray
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.data.block import (
Block,
BlockAccessor,
BlockMetadata,
BlockPartition,
BlockExecStats,
)
from ray.data.context import DatasetContext
from ray.data.impl.delegating_block_builder import DelegatingBlockBuilder
from ray.data.impl.block_list import BlockList
from ray.data.impl.progress_bar import ProgressBar
from ray.data.impl.remote_fn import cached_remote_fn
T = TypeVar("T")
U = TypeVar("U")
# A class type that implements __call__.
CallableClass = type
@DeveloperAPI
class ComputeStrategy:
def _apply(self, fn: Any, blocks: BlockList, clear_input_blocks: bool) -> BlockList:
raise NotImplementedError
@DeveloperAPI
class TaskPoolStrategy(ComputeStrategy):
def _apply(
self,
fn: Any,
remote_args: dict,
block_list: BlockList,
clear_input_blocks: bool,
) -> BlockList:
context = DatasetContext.get_current()
# Handle empty datasets.
if block_list.initial_num_blocks() == 0:
return block_list
blocks = block_list.get_blocks_with_metadata()
map_bar = ProgressBar("Map Progress", total=len(blocks))
if context.block_splitting_enabled:
map_block = cached_remote_fn(_map_block_split).options(**remote_args)
refs = [map_block.remote(b, fn, m.input_files) for b, m in blocks]
else:
map_block = cached_remote_fn(_map_block_nosplit).options(
**dict(remote_args, num_returns=2)
)
all_refs = [map_block.remote(b, fn, m.input_files) for b, m in blocks]
data_refs = [r[0] for r in all_refs]
refs = [r[1] for r in all_refs]
# Release input block references.
if clear_input_blocks:
del blocks
block_list.clear()
# Common wait for non-data refs.
try:
results = map_bar.fetch_until_complete(refs)
except (ray.exceptions.RayTaskError, KeyboardInterrupt) as e:
# One or more mapper tasks failed, or we received a SIGINT signal
# while waiting; either way, we cancel all map tasks.
for ref in refs:
ray.cancel(ref)
# Wait until all tasks have failed or been cancelled.
for ref in refs:
try:
ray.get(ref)
except (ray.exceptions.RayTaskError, ray.exceptions.TaskCancelledError):
pass
# Reraise the original task failure exception.
raise e from None
new_blocks, new_metadata = [], []
if context.block_splitting_enabled:
for result in results:
for block, metadata in result:
new_blocks.append(block)
new_metadata.append(metadata)
else:
for block, metadata in zip(data_refs, results):
new_blocks.append(block)
new_metadata.append(metadata)
return BlockList(list(new_blocks), list(new_metadata))
@PublicAPI
class ActorPoolStrategy(ComputeStrategy):
"""Specify the compute strategy for a Dataset transform.
ActorPool specifies that an autoscaling pool of actors should be used for a given
Dataset transform. This is useful for stateful setup of callable classes.
To autoscale from ``m`` to ``n`` actors, specify ``compute=ActorPool(m, n)``.
For a fixed-sized pool of size ``n``, specify ``compute=ActorPool(n, n)``.
"""
def __init__(self, min_size: int = 1, max_size: Optional[int] = None):
if min_size < 1:
raise ValueError("min_size must be > 1", min_size)
if max_size is not None and min_size > max_size:
raise ValueError("min_size must be <= max_size", min_size, max_size)
self.min_size = min_size
self.max_size = max_size or float("inf")
def _apply(
self,
fn: Any,
remote_args: dict,
block_list: BlockList,
clear_input_blocks: bool,
) -> BlockList:
"""Note: this is not part of the Dataset public API."""
context = DatasetContext.get_current()
blocks_in = block_list.get_blocks_with_metadata()
# Early release block references.
if clear_input_blocks:
block_list.clear()
orig_num_blocks = len(blocks_in)
results = []
map_bar = ProgressBar("Map Progress", total=orig_num_blocks)
class BlockWorker:
def ready(self):
return "ok"
def map_block_split(
self, block: Block, input_files: List[str]
) -> BlockPartition:
return _map_block_split(block, fn, input_files)
@ray.method(num_returns=2)
def map_block_nosplit(
self, block: Block, input_files: List[str]
) -> Tuple[Block, BlockMetadata]:
return _map_block_nosplit(block, fn, input_files)
if not remote_args:
remote_args["num_cpus"] = 1
BlockWorker = ray.remote(**remote_args)(BlockWorker)
workers = [BlockWorker.remote() for _ in range(self.min_size)]
tasks = {w.ready.remote(): w for w in workers}
metadata_mapping = {}
ready_workers = set()
while len(results) < orig_num_blocks:
ready, _ = ray.wait(
list(tasks), timeout=0.01, num_returns=1, fetch_local=False
)
if not ready:
if (
len(workers) < self.max_size
and len(ready_workers) / len(workers) > 0.8
):
w = BlockWorker.remote()
workers.append(w)
tasks[w.ready.remote()] = w
map_bar.set_description(
"Map Progress ({} actors {} pending)".format(
len(ready_workers), len(workers) - len(ready_workers)
)
)
continue
[obj_id] = ready
worker = tasks[obj_id]
del tasks[obj_id]
# Process task result.
if worker in ready_workers:
results.append(obj_id)
map_bar.update(1)
else:
ready_workers.add(worker)
map_bar.set_description(
"Map Progress ({} actors {} pending)".format(
len(ready_workers), len(workers) - len(ready_workers)
)
)
# Schedule a new task.
if blocks_in:
block, meta = blocks_in.pop()
if context.block_splitting_enabled:
ref = worker.map_block_split.remote(block, meta.input_files)
else:
ref, meta_ref = worker.map_block_nosplit.remote(
block, meta.input_files
)
metadata_mapping[ref] = meta_ref
tasks[ref] = worker
map_bar.close()
new_blocks, new_metadata = [], []
if context.block_splitting_enabled:
for result in ray.get(results):
for block, metadata in result:
new_blocks.append(block)
new_metadata.append(metadata)
else:
for block in results:
new_blocks.append(block)
new_metadata.append(metadata_mapping[block])
new_metadata = ray.get(new_metadata)
return BlockList(new_blocks, new_metadata)
def cache_wrapper(
fn: Union[CallableClass, Callable[[Any], Any]],
compute: Optional[Union[str, ComputeStrategy]],
) -> Callable[[Any], Any]:
"""Implements caching of stateful callables.
Args:
fn: Either a plain function or class of a stateful callable.
Returns:
A plain function with per-process initialization cached as needed.
"""
if isinstance(fn, CallableClass):
if compute is None:
raise ValueError(
"``compute`` must be specified when using a callable class. "
'For example, use ``compute="actors"`` or '
"``compute=ActorPoolStrategy(min, max)``."
)
def _fn(item: Any) -> Any:
if ray.data._cached_fn is None or ray.data._cached_cls != fn:
ray.data._cached_cls = fn
ray.data._cached_fn = fn()
return ray.data._cached_fn(item)
return _fn
else:
return fn
def get_compute(compute_spec: Union[str, ComputeStrategy]) -> ComputeStrategy:
if not compute_spec or compute_spec == "tasks":
return TaskPoolStrategy()
elif compute_spec == "actors":
return ActorPoolStrategy()
elif isinstance(compute_spec, ComputeStrategy):
return compute_spec
else:
raise ValueError("compute must be one of [`tasks`, `actors`, ComputeStrategy]")
def _map_block_split(block: Block, fn: Any, input_files: List[str]) -> BlockPartition:
output = []
stats = BlockExecStats.builder()
for new_block in fn(block):
accessor = BlockAccessor.for_block(new_block)
new_meta = BlockMetadata(
num_rows=accessor.num_rows(),
size_bytes=accessor.size_bytes(),
schema=accessor.schema(),
input_files=input_files,
exec_stats=stats.build(),
)
owner = DatasetContext.get_current().block_owner
output.append((ray.put(new_block, _owner=owner), new_meta))
stats = BlockExecStats.builder()
return output
def _map_block_nosplit(
block: Block, fn: Any, input_files: List[str]
) -> Tuple[Block, BlockMetadata]:
stats = BlockExecStats.builder()
builder = DelegatingBlockBuilder()
for new_block in fn(block):
builder.add_block(new_block)
new_block = builder.build()
accessor = BlockAccessor.for_block(new_block)
return new_block, accessor.get_metadata(
input_files=input_files, exec_stats=stats.build()
)
| ray-project/ray | python/ray/data/impl/compute.py | Python | apache-2.0 | 10,223 |
from django.conf.urls.defaults import patterns, url, include
from django.contrib import admin
from django.conf import settings
from django.views.generic.simple import direct_to_template
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('reciblog.blog.urls')),
url(r'^about$', direct_to_template, {'template': 'about.html'}, name='about'),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG == True:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| wraithan/reciblog | urls.py | Python | bsd-2-clause | 617 |
'''
Splits dataset into samples of specified duration (we used 4 seconds) and
puts all samples not of this duration into seperate directory
@date 2017-05-19
'''
import os
import wave
import sys
from os import listdir
from os.path import isfile, isdir, join
# Hyperparameter
wantedduration = sys.argv[1]
# if sys input is given to specify directory, take it
try:
mypath = sys.argv[2]
# else use current
except IndexError:
mypath = "./"
try:
os.mkdir(mypath+"no4secs")
except:
pass
path_processed = mypath+"processed/"
onlyfiles = [f for f in listdir(path_processed) if isfile(join(path_processed, f))]
for file_name in onlyfiles:
if file_name.split(".")[-1] == "wav":
f = wave.open(path_processed+file_name,"r")
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
if str(duration) != wantedduration:
os.rename(path_processed+file_name, mypath+'no4secs/'+file_name)
| verrannt/deepdreaming-sounds-tensorflow | datatools/deleteDurations.py | Python | mit | 970 |
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# The blacklist of OpenGL cards. Fields are:
# - A substring of the Renderer.
# - A substring of the Version.
# - True to allow shader rendering.
# - True to allow fixed-function rendering.
# If both of the last two entries are false, GL refuses to
# start.
BLACKLIST = [
# Crashes for Mugenjohncel.
("S3 Graphics DeltaChrome", "1.4 20.00", False, False),
# A bug in Mesa 7.9 and 7.10 (before 7.10.3) causes the system to
# fail to initialize the GLSL compiler.
# https://bugs.freedesktop.org/show_bug.cgi?id=35603
("Mesa", "Mesa 7.9", False, True),
("Mesa", "Mesa 7.10.3", True, True),
("Mesa", "Mesa 7.10", False, True),
# Default to allowing everything.
("", "", True, True),
]
| kfcpaladin/sze-the-game | renpy/gl/glblacklist.py | Python | mit | 1,846 |
def remove_duplicates(sentence: str) -> str:
"""
Remove duplicates from sentence
>>> remove_duplicates("Python is great and Java is also great")
'Java Python also and great is'
>>> remove_duplicates("Python is great and Java is also great")
'Java Python also and great is'
"""
return " ".join(sorted(set(sentence.split())))
if __name__ == "__main__":
import doctest
doctest.testmod()
| TheAlgorithms/Python | strings/remove_duplicate.py | Python | mit | 434 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from decimal import Decimal
import doctest
import unittest
from couchdb import design, mapping
from schematics.models import Model
from schematics.types import *
from schematics.types.compound import ListType, ModelType
from schematics.serialize import wholelist, whitelist, blacklist, make_safe_dict, make_safe_json, to_dict
from couchdb.tests import testutil
from couchdb.tests.fixtures import User
class DocumentTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
class Post(mapping.Document):
title = StringType()
def testDictType(self):
class Test(mapping.Document):
d = DictType()
a = Test(id='a')
b = Test()
# TODO - fix DictType to default to an empty dict rather than None
a.d = { }
a.d['x'] = True
b.id = 'b'
self.assertTrue(a.d.get('x'))
self.assertFalse(b.d.get('x'))
a.store(self.db)
b.store(self.db)
a = Test.load(self.db,'b')
b = Test.load(self.db,'a')
self.assertTrue(b.d.get('x'))
self.assertFalse(a.d.get('x'))
def testAutomaticID(self):
post = self.Post(title='foo bar')
assert post.id is None
post.store(self.db)
assert post.id is not None
self.assertEqual('foo bar', self.db[post.id]['title'])
def testExplicitIDViaInit(self):
post = self.Post(id='foo_bar',title='foo bar')
self.assertEqual(post.id, 'foo_bar')
post.store(self.db)
self.assertEqual('foo bar', self.db['foo_bar']['title'])
def testExplicitIDViaSetter(self):
post = self.Post(title='foo bar')
post.id = 'foo_bar'
self.assertEqual(post.id, 'foo_bar')
post.store(self.db)
self.assertEqual('foo bar', self.db['foo_bar']['title'])
def testChangeIDFailure(self):
post = self.Post(title='foo bar')
assert post.id is None
post.store(self.db)
assert post.id is not None
try:
post.id = 'new id'
self.fail('Expected AttributeError')
except AttributeError, e:
self.assertEqual('id can only be set on new documents', e.args[0])
def testBatchUpdate(self):
post1 = self.Post(title='foo bar')
post2 = self.Post(title='foo baz')
results = self.db.update([post1, post2])
self.assertEqual(2, len(results))
assert results[0][0] is True
assert results[1][0] is True
def testStoreExisting(self):
post = self.Post(title='Foo bar')
post.store(self.db)
post.store(self.db)
self.assertEqual(len(list(self.db.view('_all_docs'))), 1)
class ModelTypeTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
class Post(mapping.Document):
class Comment(Model):
author = StringType()
comment = StringType()
title = StringType()
comments = ListType(ModelType(Comment))
def testPost(self):
post = self.Post(id='foo_bar', title="Foo bar")
assert isinstance(post.comments, ListType.Proxy)
post.comments.append(self.Post.Comment(author="myself", comment="blah blah blah"))
post.store(self.db)
self.assertEqual(self.db['foo_bar']['comments'], [{u'author': u'myself', u'comment': u'blah blah blah'}])
class UserModelTypeTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def testSessionCreation(self):
token = User.Session()
assert token.token is not None
assert token.created_on is not None
def testPasswordSetterWithKeywordOnInitialization(self):
user = User(email='user1@gmail.com',password='pass1')
self.assertNotEqual(user.password, MD5Type.generate('pass1'))
self.assertTrue(user.challenge_password('pass1'))
def testSetPassword(self):
user = User(email='user1@gmail.com')
user.password = 'pass1'
self.assertNotEqual(user.password, MD5Type.generate('pass1'))
self.assertTrue(user.challenge_password('pass1'))
def testInvalidUser(self):
user = User()
user.email = 'invalid@email'
self.assertRaises( Exception, user.store, self.db)
def testValidUser(self):
user = User(email='ryan.olson@gmail.com', password=MD5Type.generate('secret'))
self.assertTrue(user.challenge_password(MD5Type.generate('secret')))
user.store(self.db)
self.assertEqual(self.db[user.id][u'email'], u'ryan.olson@gmail.com')
def testRoles(self):
user = User(email='ryan.olson@gmail.com', password=MD5Type.generate('secret'))
user.sessions.append(User.Session())
user.store(self.db)
self.assertEqual(self.db[user.id][u'email'], u'ryan.olson@gmail.com')
json = make_safe_json(User,user,'mysessions')
assert 'password' not in json
assert 'token' not in json
assert 'email' in json
assert 'created_on' in json
assert self.db[user.id]['password'] is not None
u2 = User.load(self.db, user.id)
self.assertTrue(u2.challenge_password(MD5Type.generate('secret')))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(mapping))
suite.addTest(unittest.makeSuite(DocumentTestCase, 'test'))
suite.addTest(unittest.makeSuite(ModelTypeTestCase, 'test'))
suite.addTest(unittest.makeSuite(UserModelTypeTestCase, 'test'))
# suite.addTest(unittest.makeSuite(ListFieldTestCase, 'test'))
# suite.addTest(unittest.makeSuite(WrappingTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| ryanolson/couchdb-python | couchdb/tests/mapping.py | Python | bsd-3-clause | 5,861 |
from pcs_test.tools.command_env.mock_get_local_corosync_conf import Call
from pcs_test.tools.misc import get_test_resource as rc
from pcs.lib.corosync.config_facade import ConfigFacade
from pcs.lib.corosync.config_parser import Parser, Section
class CorosyncConf:
def __init__(self, call_collection):
self.__calls = call_collection
def load_content(
self,
content,
name="corosync_conf.load_content",
instead=None,
exception_msg=None,
):
self.__calls.place(
name, Call(content, exception_msg=exception_msg), instead=instead
)
def load(
self,
node_name_list=None,
name="corosync_conf.load",
filename="corosync.conf",
auto_tie_breaker=None,
instead=None,
):
# pylint: disable=too-many-locals
with open(rc(filename)) as a_file:
content = a_file.read()
corosync_conf = None
if node_name_list:
corosync_conf = ConfigFacade(
Parser.parse(content.encode("utf-8"))
).config
for nodelist in corosync_conf.get_sections(name="nodelist"):
corosync_conf.del_section(nodelist)
nodelist_section = Section("nodelist")
corosync_conf.add_section(nodelist_section)
for i, node_name in enumerate(node_name_list):
node_section = Section("node")
node_section.add_attribute("ring0_addr", node_name)
node_section.add_attribute("nodeid", i)
node_section.add_attribute("name", node_name)
nodelist_section.add_section(node_section)
if auto_tie_breaker is not None:
corosync_conf = (
corosync_conf
if corosync_conf
else ConfigFacade(Parser.parse(content.encode("utf-8"))).config
)
for quorum in corosync_conf.get_sections(name="quorum"):
quorum.set_attribute(
"auto_tie_breaker", "1" if auto_tie_breaker else "0"
)
if corosync_conf:
content = corosync_conf.export()
self.load_content(content, name=name, instead=instead)
| feist/pcs | pcs_test/tools/command_env/config_corosync_conf.py | Python | gpl-2.0 | 2,245 |
import argparse
from random import shuffle
import json
import cPickle as pickle
import os
import numpy as np
import dataset as data
DFLT_ANGLES = '[0, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180]'
DFLT_AUGMENT_PRM = '{"angles":' + DFLT_ANGLES + ', "max_pixel": 58}'
HELP_AUGMENT_PRM = 'Parameters used to augment the training set'
DFLT_OUTDIR = os.path.join(data.train_folder(), '..', 'aug')
HELP_OUTDIR = 'Output directory to allocate augmented dataset'
def main(data_outdir='', aug_prm={}, pctil=0.75, nfold=3.0, **kwargs):
dataset_info = data.train_list(data.train_folder())
aug_dataset_info = data.save_augmented_dataset(dataset_info[0],
dataset_info[1],
data_outdir, aug_prm)
idx_train, idx_test = data.stratified_partition(aug_dataset_info[1],
1/nfold)
train_info = ([aug_dataset_info[0][i] for i in idx_train],
[aug_dataset_info[1][i] for i in idx_train])
samples_per_cat = data.samples_per_categories(np.array(train_info[1]))
max_samples = max(samples_per_cat)
min_samples = np.mean([max_samples, np.percentile(samples_per_cat, pctil)])
idx_train = data.balanced_dataset(train_info[1], int(min_samples))
shuffle(idx_train)
fid_tr = open(data_outdir + '_train.txt', 'w')
fid_ts = open(data_outdir + '_val.txt', 'w')
for i, v in enumerate(idx_train):
if i < len(idx_test):
new_line = '{0} {1}\n'.format(aug_dataset_info[0][idx_test[i]],
str(aug_dataset_info[1][idx_test[i]]))
fid_ts.write(new_line)
new_line = '{0} {1}\n'.format(train_info[0][v], str(train_info[1][v]))
fid_tr.write(new_line)
fid_tr.close()
fid_ts.close()
with open(data_outdir + '.p', 'w') as fid:
pickle.dump((aug_dataset_info, train_info, idx_train, idx_test), fid)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-do', '--data_outdir', type=str, default=DFLT_OUTDIR,
help=HELP_OUTDIR)
p.add_argument('-p', '--aug_prm', type=json.loads, default=DFLT_AUGMENT_PRM,
help=HELP_AUGMENT_PRM)
main(**vars(p.parse_args()))
| escorciav/ndsb | src/s_augment_dataset.py | Python | mit | 2,397 |
# One of your friends has an awful writing style: he almost never starts a message with a capital letter, but adds
# uppercase letters in random places throughout the message. It makes chatting with him very difficult for you, so
# you decided to write a plugin that will change each message received from your friend into a more readable form.
#
# Implement a function that will change the very first symbol of the given message to uppercase, and make all the
# other letters lowercase.
#
# Example
# For message = "you'll NEVER believe what that 'FrIeNd' of mine did!!1",
# the output should be
# fixMessage(message) = "You'll never believe what that 'friend' of mine did!!1".
def fixMessage(message):
return message. capitalize() # capitalize is what CodeFights asks for
| ntthuy11/CodeFights | Arcade/04_Python/02_SlitheringInStrings/fixMessage.py | Python | mit | 791 |
from branca.element import MacroElement
from folium.elements import JSCSSMixin
from folium.utilities import parse_options
from jinja2 import Template
class MousePosition(JSCSSMixin, MacroElement):
"""Add a field that shows the coordinates of the mouse position.
Uses the Leaflet plugin by Ardhi Lukianto under MIT license.
https://github.com/ardhi/Leaflet.MousePosition
Parameters
----------
position : str, default 'bottomright'
The standard Control position parameter for the widget.
separator : str, default ' : '
Character used to separate latitude and longitude values.
empty_string : str, default 'Unavailable'
Initial text to display.
lng_first : bool, default False
Whether to put the longitude first or not.
Set as True to display longitude before latitude.
num_digits : int, default '5'
Number of decimal places included in the displayed
longitude and latitude decimal degree values.
prefix : str, default ''
A string to be prepended to the coordinates.
lat_formatter : str, default None
Custom Javascript function to format the latitude value.
lng_formatter : str, default None
Custom Javascript function to format the longitude value.
Examples
--------
>>> fmtr = "function(num) {return L.Util.formatNum(num, 3) + ' º ';};"
>>> MousePosition(position='topright', separator=' | ', prefix="Mouse:",
... lat_formatter=fmtr, lng_formatter=fmtr)
"""
_template = Template("""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = new L.Control.MousePosition(
{{ this.options|tojson }}
);
{{ this.get_name() }}.options["latFormatter"] =
{{ this.lat_formatter }};
{{ this.get_name() }}.options["lngFormatter"] =
{{ this.lng_formatter }};
{{ this._parent.get_name() }}.addControl({{ this.get_name() }});
{% endmacro %}
""")
default_js = [
('Control_MousePosition_js',
'https://cdn.jsdelivr.net/gh/ardhi/Leaflet.MousePosition/src/L.Control.MousePosition.min.js')
]
default_css = [
('Control_MousePosition_css',
'https://cdn.jsdelivr.net/gh/ardhi/Leaflet.MousePosition/src/L.Control.MousePosition.min.css')
]
def __init__(self, position='bottomright', separator=' : ',
empty_string='Unavailable', lng_first=False, num_digits=5,
prefix='', lat_formatter=None, lng_formatter=None, **kwargs):
super(MousePosition, self).__init__()
self._name = 'MousePosition'
self.options = parse_options(
position=position,
separator=separator,
empty_string=empty_string,
lng_first=lng_first,
num_digits=num_digits,
prefix=prefix,
**kwargs
)
self.lat_formatter = lat_formatter or 'undefined'
self.lng_formatter = lng_formatter or 'undefined'
| python-visualization/folium | folium/plugins/mouse_position.py | Python | mit | 3,066 |
#Get dota text from soundboard.com
from bs4 import BeautifulSoup
import json
import re
import requests
#Website to scrape dota text from
website = "http://www.soundboard.com/sb/howboutthis"
#Get website data
data = requests.get(website).text
#Parse data using bs4
soup = BeautifulSoup(data, "html.parser")
#List of dota text
dataList = soup.find("div", {"ul": "playlist"})
texts = []
for i in range(25):
text = soup.find("a", {"id": "track_" + str(i)}).find("span").contents[0]
texts.append(text)
with open("dotaTexts", 'w') as f:
for text in texts:
f.write(text + "\n") | KingsleyBell/dotacelebbot | textList.py | Python | mit | 590 |
import esp
class DS:
@staticmethod
def calc_crc(indata):
crc = 0
for data in indata:
crc = crc ^ data
for ii in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
return crc
@staticmethod
def convert(LowByte, HighByte):
reading = (HighByte << 8) + LowByte
sign_bit = reading & 0x8000
if sign_bit:
reading = (reading ^ 0xffff) + 1 # 2's comp
Tc_100 = (6 * reading) + reading // 4 # multiply by (100 * 0.0625) or 6.25
Whole = Tc_100 // 100
Fract = Tc_100 % 100
return Whole, Fract
def __init__(self, port=5):
self.dev = esp.one_wire(port, edge=esp.gpio.INTR_ANYEDGE)
def get_rom(self):
self.dev.reset()
self.dev.write([esp.one_wire.READ_ROM], suppress_skip=True) # 33 means send id, suppress_skip means don't send a suppress skip rom command
data = self.dev.read(8)
print(data)
print("CRC", self.calc_crc(data[:-1]), data[-1])
def convert_temp(self):
self.dev.reset()
self.dev.write([esp.one_wire.CONVERT_T])
def get_temp(self):
self.dev.reset()
self.dev.write([esp.one_wire.READ_SCRATCH_PAD])
dd = self.dev.read(9)
print(dd)
print("CRC", self.calc_crc(dd[:-1]), dd[-1])
print(self.convert(*dd[:2]))
| mianos/micropython | esp8266/examples/onewire.py | Python | mit | 1,458 |
#! /usr/bin/env python
# coding=utf-8
""" Grep-like tool for FITS headers
Call commandline: ``grepfitshdr --help`` for parameters info.
"""
from __future__ import print_function, division
import astropy.io.fits as pyfits
import re
from os.path import isfile
from sys import exit
from sys import stdout
from glob import glob
import astwro.tools.__commons as commons
def headers(filenames, hdus):
for fname in filenames:
if isfile(fname):
fits = pyfits.open(fname)
for hdu in hdus:
try:
fits[hdu].verify('silentfix')
h = fits[hdu].header
yield h, fname, hdu
except IndexError:
print('[WARN] HDU {} non exist in file {}'.format(hdu, fname))
fits.close()
def printmatch(output, filename, hdu, line, withfile, fileonly):
if not fileonly:
if withfile:
print (filename + ' hdu:{}: '.format(hdu), end='', file=output)
print (line, file=output)
def iter_fields(hdr, onlyvalues=False, fields=None):
""" splits header into lines
if onlyvalues does not return field names
if fields returns only specified fields (forces onlyvalues)"""
if fields:
for key in hdr:
if key in fields:
yield hdr[key]
elif onlyvalues:
for val in hdr.values():
if val:
yield str(val)
else:
for line in repr(hdr).strip().splitlines():
yield line
def grep(pattern, filenames, output=stdout, invert=False, withfile=False, fileonly=False,
fields=None, onlyvalues=False, ignorecase=True, hdu=None):
if fields is not None and fields[0] == '*':
fields = None
onlyvalues = True
if hdu is None:
hdu = [0]
if isinstance(filenames, str):
filenames = glob(filenames)
regexp = re.compile(pattern, flags=re.IGNORECASE if ignorecase else 0)
globmatched = 0
for h, f, n in headers(filenames, hdus=hdu):
matched = scanned = 0
# rep = repr(h).strip()
# for line in rep.splitlines():
for line in iter_fields(h, onlyvalues=onlyvalues, fields=fields):
match = regexp.search(str(line))
if invert:
match = not match
scanned += 1
if match:
matched += 1
printmatch(output, f, n, line, withfile, fileonly)
if fileonly and not invert:
break
globmatched += matched
if fileonly:
if (not invert and matched > 0) or (invert and matched == scanned):
print (f, file=output)
return globmatched
def __do(arg):
if isinstance(arg.hdu, str):
arg.hdu = [int(n) for n in arg.hdu.split(',')]
return grep(arg.pattern, arg.file, invert=arg.v, withfile=arg.H,
fileonly=arg.l, fields=arg.f, ignorecase=arg.i, hdu=arg.hdu)
def __arg_parser():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
fromfile_prefix_chars='@',
epilog='exit code:\n'
' 0 if any header matched pattern\n'
' 1 if no match found\n\n' + commons.version_string(),
description='grep-like utility for fits (main) headers\n '
'until -f specified, searches in keys, values and comments')
parser.add_argument('pattern', type=str,
help='reg-exp, use single dot . to dump all header fields')
parser.add_argument('file', type=str, nargs='+',
help='FITS file(s), catalog file containing file names prefixed by @ can be provided')
parser.add_argument('-i', action='store_true',
help='ignore case')
parser.add_argument('-v', action='store_true',
help='invert match')
parser.add_argument('-H', action='store_true',
help='add filename to each found line')
parser.add_argument('-l', action='store_true',
help='print filenames with matches only')
parser.add_argument('-f', action='append', metavar='FIELD',
help='matches only specified FIELD\'s value; can be provided multiple '
'times to match several fields; -f* limits search to values but searches '
'in all fields')
parser.add_argument('-n', '--hdu', metavar='x,y,z,...',
help='HDU(s) to scan, comma-separated-list; default: 0')
return parser
def main(pattern, file, **kwargs):
"""Entry point for python script calls. Parameters identical to command line"""
# Extract default arguments from command line parser and apply kwargs parameters
args = commons.bunch_kwargs(__arg_parser(), positional=[pattern, file], **kwargs)
# call main routine - common form command line and python calls
return __do(args)
def info():
"""Prints commandline help message"""
commons.info(__arg_parser())
def commandline_entry():
# Entry point for command line
__args = __arg_parser().parse_args() # parse command line arguments
n = __do(__args) # call main routine - common form command line and python calls
return 0 if n > 0 else 1
if __name__ == '__main__':
code = commandline_entry()
exit(code)
| majkelx/astwro | astwro/tools/grepfitshdr.py | Python | mit | 5,442 |
#!/usr/bin/env python3
import os
import sys
assert sys.version_info >= (3, 5)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "courses.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sfu-fas/coursys | manage.py | Python | gpl-3.0 | 285 |
from django.db import connection, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, get_ul_class
from django.db.models.related import RelatedObject
from django.db.models.query_utils import QueryWrapper
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
from django.utils.encoding import smart_unicode
from django.core import validators
from django import oldforms
from django import newforms as forms
from django.dispatch import dispatcher
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
# Values for Relation.edit_inline.
TABULAR, STACKED = 1, 2
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
field.rel.to = model
field.do_related_class(model, cls)
else:
key = (app_label, model_name)
value = (cls, field)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field in pending_lookups.pop(key, []):
field.rel.to = sender
field.do_related_class(sender, cls)
dispatcher.connect(do_pending_lookups, signal=signals.class_prepared)
def manipulator_valid_rel_key(f, self, field_data, all_data):
"Validates that the value is a valid foreign key"
klass = f.rel.to
try:
klass._default_manager.get(**{f.rel.field_name: field_data})
except klass.DoesNotExist:
raise validators.ValidationError, _("Please enter a valid %s.") % f.verbose_name
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
other = self.rel.to
if isinstance(other, basestring):
add_lazy_relation(cls, self, other)
else:
self.do_related_class(other, cls)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
self.verbose_name = self.verbose_name or self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
try:
while True:
v = getattr(v, v._meta.pk.name)
except AttributeError:
pass
return v
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
if lookup_type == 'exact':
return [pk_trace(value)]
if lookup_type == 'in':
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.field.name
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Clear the cache, if it exists
try:
delattr(value, self.related.field.get_cache_name())
except AttributeError:
pass
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
rel_obj = self.field.rel.to._default_manager.get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Clear the cache, if it exists
try:
delattr(instance, self.field.get_cache_name())
except AttributeError:
pass
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
new_obj = self.model(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
manager.core_filters = {'%s__pk' % rel_field.name: getattr(instance, rel_field.rel.get_related_field().attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % model)
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
new_obj = self.model(**kwargs)
new_obj.save()
self.add(new_obj)
return new_obj
create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, num_in_admin=3, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=1, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.min_num_in_admin, self.max_num_in_admin = min_num_in_admin, max_num_in_admin
self.num_extra_on_change, self.related_name = num_extra_on_change, related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, num_in_admin=0, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=None, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False, parent_link=False):
# NOTE: *_num_in_admin and num_extra_on_change are intentionally
# ignored here. We accept them as parameters only to match the calling
# signature of ManyToOneRel.__init__().
super(OneToOneRel, self).__init__(to, field_name, num_in_admin,
edit_inline=edit_inline, related_name=related_name,
limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, raw_id_admin=raw_id_admin,
parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, num_in_admin=0, related_name=None,
filter_interface=None, limit_choices_to=None, raw_id_admin=False, symmetrical=True):
self.to = to
self.num_in_admin = num_in_admin
self.related_name = related_name
self.filter_interface = filter_interface
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.edit_inline = False
self.raw_id_admin = raw_id_admin
self.symmetrical = symmetrical
self.multiple = True
assert not (self.raw_id_admin and self.filter_interface), "ManyToManyRels may not use both raw_id_admin and filter_interface"
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if 'edit_inline_type' in kwargs:
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.", DeprecationWarning)
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = rel_class(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 3),
min_num_in_admin=kwargs.pop('min_num_in_admin', None),
max_num_in_admin=kwargs.pop('max_num_in_admin', None),
num_extra_on_change=kwargs.pop('num_extra_on_change', 1),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def prepare_field_objs_and_params(self, manipulator, name_prefix):
params = {'validator_list': self.validator_list[:], 'member_name': name_prefix + self.attname}
if self.rel.raw_id_admin:
field_objs = self.get_manipulator_field_objs()
params['validator_list'].append(curry(manipulator_valid_rel_key, self, manipulator))
else:
if self.radio_admin:
field_objs = [oldforms.RadioSelectField]
params['ul_class'] = get_ul_class(self.radio_admin)
else:
if self.null:
field_objs = [oldforms.NullSelectField]
else:
field_objs = [oldforms.SelectField]
params['choices'] = self.get_choices_default()
return field_objs, params
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_manipulator_field_objs(self):
rel_field = self.rel.get_related_field()
if self.rel.raw_id_admin and not isinstance(rel_field, AutoField):
return rel_field.get_manipulator_field_objs()
else:
return [oldforms.IntegerField]
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def flatten_data(self, follow, obj=None):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# (radio_admin=False), we have to check that the length of choices
# is *2*, not 1, because SelectFields always have an initial
# "blank" value. Otherwise (radio_admin=True), we check that the
# length is 1.
if not self.blank and (not self.rel.raw_id_admin or self.choices):
choice_list = self.get_choices_default()
if self.radio_admin and len(choice_list) == 1:
return {self.attname: choice_list[0][0]}
if not self.radio_admin and len(choice_list) == 2:
return {self.attname: choice_list[1][0]}
return Field.flatten_data(self, follow, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelChoiceField, 'queryset': self.rel.to._default_manager.all()}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
rel_field = self.rel.get_related_field()
if isinstance(rel_field, (AutoField, PositiveIntegerField, PositiveSmallIntegerField)):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
if 'num_in_admin' not in kwargs:
kwargs['num_in_admin'] = 0
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
num_in_admin=kwargs.pop('num_in_admin', 0),
related_name=kwargs.pop('related_name', None),
filter_interface=kwargs.pop('filter_interface', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
raw_id_admin=kwargs.pop('raw_id_admin', False),
symmetrical=kwargs.pop('symmetrical', True))
self.db_table = kwargs.pop('db_table', None)
if kwargs["rel"].raw_id_admin:
kwargs.setdefault("validator_list", []).append(self.isValidIDList)
Field.__init__(self, **kwargs)
if self.rel.raw_id_admin:
msg = ugettext_lazy('Separate multiple IDs with commas.')
else:
msg = ugettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_manipulator_field_objs(self):
if self.rel.raw_id_admin:
return [oldforms.RawIdAdminField]
else:
choices = self.get_choices_default()
return [curry(oldforms.SelectMultipleField, size=min(max(len(choices), 5), 15), choices=choices)]
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.db_table:
return self.db_table
else:
return '%s_%s' % (opts.db_table, self.name)
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
# If this is an m2m relation to self, avoid the inevitable name clash
if related.model == related.parent_model:
return 'from_' + related.model._meta.object_name.lower() + '_id'
else:
return related.model._meta.object_name.lower() + '_id'
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
# If this is an m2m relation to self, avoid the inevitable name clash
if related.model == related.parent_model:
return 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
return related.parent_model._meta.object_name.lower() + '_id'
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise validators.ValidationError, ungettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.", len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
}
def flatten_data(self, follow, obj = None):
new_data = {}
if obj:
instance_ids = [instance._get_pk_val() for instance in getattr(obj, self.name).all()]
if self.rel.raw_id_admin:
new_data[self.name] = u",".join([smart_unicode(id) for id in instance_ids])
else:
new_data[self.name] = instance_ids
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank and not self.rel.edit_inline and not self.rel.raw_id_admin:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
new_data[self.name] = [choices_list[0][0]]
return new_data
def contribute_to_class(self, cls, name):
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.all()}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
defaults['initial'] = [i._get_pk_val() for i in defaults['initial']]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| pelle/talk.org | django/db/models/fields/related.py | Python | gpl-3.0 | 37,425 |
from pledger.account import Account, AccountFactory
from pledger.parser import Parser
from pledger.directive import *
from pledger.ledger_processor import LedgerProcessor
import pytest
class ProcessorStub(object):
def __init__(self):
self.repo = AccountFactory()
self.account = self.repo.root()
self.included = []
def add_account_prefix(self, prefix):
self.account = self.account[prefix]
def remove_account_prefix(self):
self.account = self.account.parent
def include(self, filename):
self.included.append(filename)
@pytest.fixture
def processor(parser):
return ProcessorStub()
def test_directive_registry():
assert Directive.directives['account'] == AccountDirective
assert Directive.directives.get('non-existing-directive') is None
def test_unsupported_directive(parser):
with pytest.raises(UnsupportedDirective) as e:
parser.parse_directive("!nonexisting")
assert 'nonexisting' == str(e.value)
def test_account_directive(processor):
directive = AccountDirective("Assets")
assert processor.account.name == ""
directive.execute(processor)
assert processor.account.name == "Assets"
def test_end_account_directive(processor):
directive = EndAccountDirective()
processor.add_account_prefix("Assets")
directive.execute(processor)
assert processor.account.name == ""
def test_include_directive(processor):
directive = IncludeDirective("test.dat")
assert processor.included == []
directive.execute(processor)
assert processor.included == ["test.dat"]
def test_directive_parsing(parser):
directive = parser.parse_directive("!include test.dat")
assert directive.filename == "test.dat"
| pcapriotti/pledger | tests/test_directive.py | Python | mit | 1,746 |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FeatureValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_feature=None, custom=None, value=None):
"""
FeatureValue - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_feature': 'int',
'custom': 'bool',
'value': 'list[I18nField]'
}
self.attribute_map = {
'id': 'id',
'id_feature': 'id_feature',
'custom': 'custom',
'value': 'value'
}
self._id = id
self._id_feature = id_feature
self._custom = custom
self._value = value
@property
def id(self):
"""
Gets the id of this FeatureValue.
:return: The id of this FeatureValue.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this FeatureValue.
:param id: The id of this FeatureValue.
:type: int
"""
self._id = id
@property
def id_feature(self):
"""
Gets the id_feature of this FeatureValue.
:return: The id_feature of this FeatureValue.
:rtype: int
"""
return self._id_feature
@id_feature.setter
def id_feature(self, id_feature):
"""
Sets the id_feature of this FeatureValue.
:param id_feature: The id_feature of this FeatureValue.
:type: int
"""
self._id_feature = id_feature
@property
def custom(self):
"""
Gets the custom of this FeatureValue.
:return: The custom of this FeatureValue.
:rtype: bool
"""
return self._custom
@custom.setter
def custom(self, custom):
"""
Sets the custom of this FeatureValue.
:param custom: The custom of this FeatureValue.
:type: bool
"""
self._custom = custom
@property
def value(self):
"""
Gets the value of this FeatureValue.
:return: The value of this FeatureValue.
:rtype: list[I18nField]
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this FeatureValue.
:param value: The value of this FeatureValue.
:type: list[I18nField]
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| kinow-io/kinow-python-sdk | kinow_client/models/feature_value.py | Python | apache-2.0 | 4,404 |
"""
Tests for OEShape color utilities.
"""
import numpy as np
import unittest
from openeye.oechem import *
from openeye.oeshape import *
from ..color import ColorForceField
class TestColorForceField(unittest.TestCase):
"""
Tests for ColorForceField.
"""
def setUp(self):
"""
Set up tests.
"""
self.color_ff = ColorForceField()
self.color_ff.Init(OEColorFFType_ImplicitMillsDean)
def test_get_interactions(self):
"""
Test ColorForceField.get_interactions.
"""
interactions = self.color_ff.get_interactions()
assert len(interactions) == 6
for (a_type, b_type, decay, weight, radius) in interactions:
assert a_type == b_type
assert decay == 'gaussian'
assert weight < 0
assert radius > 0
def test_get_string(self):
"""
Test ColorForceField.get_string.
"""
ifs = oeisstream(self.color_ff.get_string())
color_ff = ColorForceField()
color_ff.Init(ifs)
for a_interaction, b_interaction in zip(
color_ff.get_interactions(), self.color_ff.get_interactions()):
assert np.array_equal(a_interaction, b_interaction)
def test_isolate_interactions(self):
"""
Test ColorForceField.isolate_interactions.
"""
interactions = set()
for color_ff in self.color_ff.isolate_interactions():
assert len(color_ff.get_interactions()) == 1
for interaction in color_ff.get_interactions():
interactions.add(interaction)
assert interactions == set(self.color_ff.get_interactions())
| skearnes/color-features | oe_utils/shape/tests/test_color.py | Python | bsd-3-clause | 1,693 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.