code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from DistributedObjectUD import DistributedObjectUD
from direct.directnotify.DirectNotifyGlobal import directNotify
import sys
class DistributedObjectGlobalUD(DistributedObjectUD):
notify = directNotify.newCategory('DistributedObjectGlobalUD')
doNotDeallocateChannel = 1
isGlobalDistObj = 1
def __init__(self, air):
DistributedObjectUD.__init__(self, air)
self.ExecNamespace = {"self":self}
def announceGenerate(self):
self.air.registerForChannel(self.doId)
DistributedObjectUD.announceGenerate(self)
def delete(self):
self.air.unregisterForChannel(self.doId)
## self.air.removeDOFromTables(self)
DistributedObjectUD.delete(self)
def execCommand(self, command, mwMgrId, avId, zoneId):
text = str(self.__execMessage(command))[:config.GetInt("ai-debug-length",300)]
dclass = uber.air.dclassesByName.get("PiratesMagicWordManagerAI")
dg = dclass.aiFormatUpdate(
"setMagicWordResponse", mwMgrId, (1<<32)+avId, uber.air.ourChannel, [text])
uber.air.send(dg)
def __execMessage(self, message):
if not self.ExecNamespace:
# Import some useful variables into the ExecNamespace initially.
exec('from pandac.PandaModules import *', globals(), self.ExecNamespace)
#self.importExecNamespace()
# Now try to evaluate the expression using ChatInputNormal.ExecNamespace as
# the local namespace.
try:
return str(eval(message, globals(), self.ExecNamespace))
except SyntaxError:
# Maybe it's only a statement, like "x = 1", or
# "import math". These aren't expressions, so eval()
# fails, but they can be exec'ed.
try:
exec(message, globals(), self.ExecNamespace)
return 'ok'
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
except:
exception = sys.exc_info()[0]
extraInfo = sys.exc_info()[1]
if extraInfo:
return str(extraInfo)
else:
return str(exception)
|
mgracer48/panda3d
|
direct/src/distributed/DistributedObjectGlobalUD.py
|
Python
|
bsd-3-clause
| 2,339
|
# RNA Splicing
# rosalind.info/problems/splc/
import sys
class splc:
codons = {
'TTT': 'F', 'CTT': 'L', 'ATT': 'I', 'GTT': 'V',
'TTC': 'F', 'CTC': 'L', 'ATC': 'I', 'GTC': 'V',
'TTA': 'L', 'CTA': 'L', 'ATA': 'I', 'GTA': 'V',
'TTG': 'L', 'CTG': 'L', 'ATG': 'M', 'GTG': 'V',
'TCT': 'S', 'CCT': 'P', 'ACT': 'T', 'GCT': 'A',
'TCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',
'TCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',
'TCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',
'TAT': 'Y', 'CAT': 'H', 'AAT': 'N', 'GAT': 'D',
'TAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',
'TAA': 'stop', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',
'TAG': 'stop', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',
'TGT': 'C', 'CGT': 'R', 'AGT': 'S', 'GGT': 'G',
'TGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',
'TGA': 'stop', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',
'TGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'
}
def main(self, dna_seq):
if not dna_seq:
raise Exception('ERROR: File is empty.')
data = [line.strip() for line in dna_seq]
dna = data[0]
introns = data[1:]
for i in introns:
dna = dna.replace(i, '')
res = ''
for i in range(0, len(dna), 3):
c = dna[i:i+3]
if self.codons[c] == 'stop':
break
res += self.codons[c]
return res
if __name__ == '__main__':
filename = sys.argv[1]
if not filename:
raise Exception('ERROR: File name should not be empty!')
with open(filename, 'r') as seq_file:
result = splc().main(seq_file)
print(result)
|
yuriyshapovalov/Prototypes
|
Rosalind/splc.py
|
Python
|
apache-2.0
| 1,917
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating the usage of the TaskFlow API to execute Python functions natively and within a
virtual environment.
"""
import logging
import shutil
import time
from datetime import datetime
from pprint import pprint
from airflow import DAG
from airflow.decorators import task
log = logging.getLogger(__name__)
with DAG(
dag_id='example_python_operator',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example'],
) as dag:
# [START howto_operator_python]
@task(task_id="print_the_context")
def print_context(ds=None, **kwargs):
"""Print the Airflow context and ds variable from the context."""
pprint(kwargs)
print(ds)
return 'Whatever you return gets printed in the logs'
run_this = print_context()
# [END howto_operator_python]
# [START howto_operator_python_kwargs]
# Generate 5 sleeping tasks, sleeping from 0.0 to 0.4 seconds respectively
for i in range(5):
@task(task_id=f'sleep_for_{i}')
def my_sleeping_function(random_base):
"""This is a function that will run within the DAG execution"""
time.sleep(random_base)
sleeping_task = my_sleeping_function(random_base=float(i) / 10)
run_this >> sleeping_task
# [END howto_operator_python_kwargs]
if not shutil.which("virtualenv"):
log.warning("The virtalenv_python example task requires virtualenv, please install it.")
else:
# [START howto_operator_python_venv]
@task.virtualenv(
task_id="virtualenv_python", requirements=["colorama==0.4.0"], system_site_packages=False
)
def callable_virtualenv():
"""
Example function that will be performed in a virtual environment.
Importing at the module level ensures that it will not attempt to import the
library before it is installed.
"""
from time import sleep
from colorama import Back, Fore, Style
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.RESET_ALL)
for _ in range(10):
print(Style.DIM + 'Please wait...', flush=True)
sleep(10)
print('Finished')
virtualenv_task = callable_virtualenv()
# [END howto_operator_python_venv]
|
Acehaidrey/incubator-airflow
|
airflow/example_dags/example_python_operator.py
|
Python
|
apache-2.0
| 3,269
|
# Generated by Django 2.0.5 on 2018-06-11 11:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(blank=True, help_text='Provide a message for the invited user', max_length=300, verbose_name='Optional message')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_invitations', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(help_text='Please select a user you want to invite', on_delete=django.db.models.deletion.CASCADE, related_name='received_invitations', to=settings.AUTH_USER_MODEL, verbose_name='User to invite')),
],
),
]
|
matija94/show-me-the-code
|
learning_django/player/migrations/0001_initial.py
|
Python
|
mit
| 1,195
|
"""
Demo platform that has a couple of fake sensors.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from blumate.const import ATTR_BATTERY_LEVEL, TEMP_CELSIUS
from blumate.helpers.entity import Entity
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo sensors."""
add_devices([
DemoSensor('Outside Temperature', 15.6, TEMP_CELSIUS, 12),
DemoSensor('Outside Humidity', 54, '%', None),
])
class DemoSensor(Entity):
"""Representation of a Demo sensor."""
def __init__(self, name, state, unit_of_measurement, battery):
"""Initialize the sensor."""
self._name = name
self._state = state
self._unit_of_measurement = unit_of_measurement
self._battery = battery
@property
def should_poll(self):
"""No polling needed for a demo sensor."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery,
}
|
bdfoster/blumate
|
blumate/components/sensor/demo.py
|
Python
|
mit
| 1,564
|
import os
import glob
subdir = os.listdir('Images')
for d in subdir:
if d != 'Images':
path = os.path.join('Images', d)
if os.path.isdir(path):
dogname = ''.join(d.split('-')[1:])
if dogname != '':
dogdir = os.listdir(path)
for i in range(len(dogdir)):
imgpath = os.path.join(path, dogdir[i])
newimgpath = os.path.join(path, "%s_%s.jpg" % (dogname, i))
os.rename(os.path.join(path, dogdir[i]), newimgpath)
os.rename(path, os.path.join('Images', dogname))
|
nilrogen/UML-ML-MDR
|
scripts/rename-images.py
|
Python
|
gpl-2.0
| 618
|
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import sys
from wlauto import ResultProcessor
from wlauto.core.result import IterationResult
from wlauto.exceptions import ResultProcessorError
try:
import notify2
except ImportError:
notify2 = None
class NotifyProcessor(ResultProcessor):
name = 'notify'
description = '''Display a desktop notification when the run finishes
Notifications only work in linux systems. It uses the generic
freedesktop notification specification. For this results processor
to work, you need to have python-notify installed in your system.
'''
def initialize(self, context):
if sys.platform != 'linux2':
raise ResultProcessorError('Notifications are only supported in linux')
if not notify2:
raise ResultProcessorError('notify2 not installed. Please install the notify2 package')
notify2.init("Workload Automation")
def process_run_result(self, result, context):
num_iterations = sum(context.job_iteration_counts.values())
counter = collections.Counter()
for result in result.iteration_results:
counter[result.status] += 1
score_board = []
for status in IterationResult.values:
if status in counter:
score_board.append('{} {}'.format(counter[status], status))
summary = 'Workload Automation run finised'
body = 'Ran a total of {} iterations: '.format(num_iterations)
body += ', '.join(score_board)
notification = notify2.Notification(summary, body)
if not notification.show():
self.logger.warning('Notification failed to show')
|
freedomtan/workload-automation
|
wlauto/result_processors/notify.py
|
Python
|
apache-2.0
| 2,244
|
#! /bin/env python
# -*- coding: utf-8 -*-
import sys
import datetime
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
############################################################################################
## Message
############################################################################################
class Message:
def __init__(self, listeInfo):
self.dateheure = datetime.datetime.strptime(listeInfo[0]+' '+listeInfo[1], '%Y-%m-%d %H:%M:%S')
self.inout = listeInfo[2]
self.numero = listeInfo[3]
self.exp = listeInfo[4]
self.message = _fromUtf8(listeInfo[5][:-1])
############################################################################################
## Conversation Window of 'name'
############################################################################################
class ConvWindow(QtGui.QScrollArea):
def __init__(self, name, msgs):
self.msgs = msgs
self.name = _fromUtf8(name)
QtGui.QScrollArea.__init__(self)
self.setWindowTitle("Conversation {}".format(self.name))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.setSizePolicy(sizePolicy)
self.setWidgetResizable(True)
self.scrollAreaWidgetContents = QtGui.QWidget()
self.gridLayout_2 = QtGui.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout = QtGui.QGridLayout()
for num, msg in enumerate(self.msgs[name]):
self.dispmsg(msg, num)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 5)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 1, 0, 1, 1)
self.setWidget(self.scrollAreaWidgetContents)
def dispmsg(self, msg, num):
self.lab = QtGui.QLabel()
self.lab.setText(msg.message)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
self.lab.setSizePolicy(sizePolicy)
self.lab.setWordWrap(True)
if msg.inout == 'in':
self.gridLayout.addWidget(self.lab, num, 0, 1, 2)
self.lab.setStyleSheet("QLabel {border:2px solid grey; border-radius: 5px; background-color : orange; color : white;}")
elif msg.inout == 'out':
self.lab.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.lab.setStyleSheet("QLabel {border:2px solid grey; border-radius: 5px; background-color : green; color : white;}")
self.gridLayout.addWidget(self.lab, num, 1, 1, 2)
############################################################################################
# MainWindow select the person you want to see the conversation of
############################################################################################
class MainWindow(QtGui.QMainWindow):
def __init__(self):
""" Show all conversations """
QtGui.QMainWindow.__init__(self)
self.setWindowTitle("Consultation SMS")
self.resize(500, 600)
self.centralwidget = QtGui.QWidget(self)
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.verticalLayout = QtGui.QVBoxLayout()
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.scrollArea.setWidgetResizable(True)
self.gridoftable = QtGui.QGridLayout(self.scrollArea)
self.tableView = QtGui.QTableWidget(self.scrollArea)
self.lecture('backup')
self.settable()
self.tableView.itemDoubleClicked.connect(self.openconv)
self.gridoftable.addWidget(self.tableView, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.scrollArea)
self.pushButton = QtGui.QPushButton("Ok Go !", self.centralwidget)
self.pushButton.released.connect(self.openconv)
self.verticalLayout.addWidget(self.pushButton)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.setCentralWidget(self.centralwidget)
self.marandspa([self.gridLayout, self.gridoftable, self.verticalLayout])
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.openconv()
def marandspa(self, layouts):
for layout in layouts:
layout.setMargin(0)
layout.setSpacing(0)
def settable(self):
""" All setters for the table """
self.tableView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableView.setAlternatingRowColors(True)
self.tableView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableView.setSortingEnabled(True)
self.tableView.verticalHeader().setVisible(False)
self.tableView.horizontalHeader().setStretchLastSection(True) # Bord collant à la fenetre
self.filltable()
self.setheadertable()
self.tableView.verticalHeader().resizeSections(QtGui.QHeaderView.ResizeToContents)
self.tableView.verticalHeader().setDefaultSectionSize(40) # Hauteur des lignes
self.tableView.resizeColumnToContents(0) # Redimenssionne automatiquement la première colonne
def openconv(self):
name = self.tableView.item(self.tableView.currentRow(), 0).text().split('\n')[0]
self.conv = ConvWindow(str(name), self.msgs)
self.conv.show()
def filltable(self):
""" Fill the table with the list created by self.conversations() """
self.conversations()
self.tableView.setRowCount(len(self.conv))
self.tableView.setColumnCount(len(self.conv[0]))
for i, row in enumerate(self.conv):
for j, col in enumerate(row):
item = QtGui.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, col)
self.tableView.setItem(i, j, item)
def setheadertable(self):
""" Create columns headers """
head = ["Name", "Last"] # 2 columns (à mettre ailleurs / differement)
for i in range(len(head)):
self.tableView.setHorizontalHeaderItem(i, QtGui.QTableWidgetItem(head[i]))
def lecture(self, fileName):
""" Create self.msgs a dictionnary {names:[<Message object>]} """
fichier = open(fileName, 'r')
self.msgs = {}
for lignes in fichier:
items = lignes.split('\t')
msg = Message(items)
if msg.exp not in self.msgs.keys():
self.msgs[msg.exp] = []
self.msgs[msg.exp].append(msg)
def conversations(self):
""" Create self.conv a list of last msgs by names [name (#), last date & msg ] """
self.conv = []
for name in self.msgs.keys():
try:
int(name.split(';')[0])
except ValueError:
self.conv.append([_fromUtf8(name) + '\n({})'.format(str(len(self.msgs[name]))), self.get_last_date(self.msgs[name]) + '\n' + self.get_last_msg(self.msgs[name])])
def get_last_date(self, name_msgs):
return _fromUtf8(sorted(map(lambda x: x.dateheure.strftime('%Y-%m-%d %H:%M'), name_msgs))[-1])
def get_last_msg(self, name_msgs):
return sorted(map(lambda x: x.message, name_msgs))[-1][:-1]
############################################################################################
## Main
############################################################################################
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
mainwindow = MainWindow()
mainwindow.show()
sys.exit(app.exec_())
|
Fritzip/SMS-Viewer
|
sms-viewer.py
|
Python
|
gpl-2.0
| 7,977
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class Unit(IdentifiedObject):
"""Quantity being measured. The Unit.name shall be unique among all specified quantities and describe the quantity. The Unit.aliasName is meant to be used for localization.
"""
def __init__(self, Controls=None, Measurements=None, MetaBlockConOutput=None, MetaBlockConInput=None, ProtectionEquipments=None, *args, **kw_args):
"""Initialises a new 'Unit' instance.
@param Controls: The Controls having the Unit.
@param Measurements: The Measurements having the Unit
@param MetaBlockConOutput:
@param MetaBlockConInput:
@param ProtectionEquipments: The Protection Equipments having the Unit.
"""
self._Controls = []
self.Controls = [] if Controls is None else Controls
self._Measurements = []
self.Measurements = [] if Measurements is None else Measurements
self._MetaBlockConOutput = []
self.MetaBlockConOutput = [] if MetaBlockConOutput is None else MetaBlockConOutput
self._MetaBlockConInput = []
self.MetaBlockConInput = [] if MetaBlockConInput is None else MetaBlockConInput
self._ProtectionEquipments = []
self.ProtectionEquipments = [] if ProtectionEquipments is None else ProtectionEquipments
super(Unit, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Controls", "Measurements", "MetaBlockConOutput", "MetaBlockConInput", "ProtectionEquipments"]
_many_refs = ["Controls", "Measurements", "MetaBlockConOutput", "MetaBlockConInput", "ProtectionEquipments"]
def getControls(self):
"""The Controls having the Unit.
"""
return self._Controls
def setControls(self, value):
for x in self._Controls:
x.Unit = None
for y in value:
y._Unit = self
self._Controls = value
Controls = property(getControls, setControls)
def addControls(self, *Controls):
for obj in Controls:
obj.Unit = self
def removeControls(self, *Controls):
for obj in Controls:
obj.Unit = None
def getMeasurements(self):
"""The Measurements having the Unit
"""
return self._Measurements
def setMeasurements(self, value):
for x in self._Measurements:
x.Unit = None
for y in value:
y._Unit = self
self._Measurements = value
Measurements = property(getMeasurements, setMeasurements)
def addMeasurements(self, *Measurements):
for obj in Measurements:
obj.Unit = self
def removeMeasurements(self, *Measurements):
for obj in Measurements:
obj.Unit = None
def getMetaBlockConOutput(self):
return self._MetaBlockConOutput
def setMetaBlockConOutput(self, value):
for x in self._MetaBlockConOutput:
x.Unit = None
for y in value:
y._Unit = self
self._MetaBlockConOutput = value
MetaBlockConOutput = property(getMetaBlockConOutput, setMetaBlockConOutput)
def addMetaBlockConOutput(self, *MetaBlockConOutput):
for obj in MetaBlockConOutput:
obj.Unit = self
def removeMetaBlockConOutput(self, *MetaBlockConOutput):
for obj in MetaBlockConOutput:
obj.Unit = None
def getMetaBlockConInput(self):
return self._MetaBlockConInput
def setMetaBlockConInput(self, value):
for x in self._MetaBlockConInput:
x.Unit = None
for y in value:
y._Unit = self
self._MetaBlockConInput = value
MetaBlockConInput = property(getMetaBlockConInput, setMetaBlockConInput)
def addMetaBlockConInput(self, *MetaBlockConInput):
for obj in MetaBlockConInput:
obj.Unit = self
def removeMetaBlockConInput(self, *MetaBlockConInput):
for obj in MetaBlockConInput:
obj.Unit = None
def getProtectionEquipments(self):
"""The Protection Equipments having the Unit.
"""
return self._ProtectionEquipments
def setProtectionEquipments(self, value):
for x in self._ProtectionEquipments:
x.Unit = None
for y in value:
y._Unit = self
self._ProtectionEquipments = value
ProtectionEquipments = property(getProtectionEquipments, setProtectionEquipments)
def addProtectionEquipments(self, *ProtectionEquipments):
for obj in ProtectionEquipments:
obj.Unit = self
def removeProtectionEquipments(self, *ProtectionEquipments):
for obj in ProtectionEquipments:
obj.Unit = None
|
rwl/PyCIM
|
CIM14/IEC61970/Core/Unit.py
|
Python
|
mit
| 5,881
|
def extractQuillofkarnikaWordpressCom(item):
'''
Parser for 'quillofkarnika.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Top Giants: Rebirth of the Black-Bellied Wife', 'Top Giants: Rebirth of the Black-Bellied Wife', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractQuillofkarnikaWordpressCom.py
|
Python
|
bsd-3-clause
| 708
|
from .autocomplete import bp as autocomplete_bp
from .tables import bp as tables_bp
from .transactions import bp as transactions_bp
__all__ = [
autocomplete_bp,
tables_bp,
transactions_bp
]
|
atomberg/transaction-organizer
|
app/blueprints/__init__.py
|
Python
|
mit
| 203
|
"""
The LDBDClient module provides an API for connecting to and making requests of
a LDBDServer.
This module requires U{pyGlobus<http://www-itg.lbl.gov/gtg/projects/pyGlobus/>}.
This file is part of the Grid LSC User Environment (GLUE)
GLUE is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
from pycbc_glue import git_version
__date__ = git_version.date
__version__ = git_version.id
import sys
import os
import exceptions
import types
import re
import cPickle
import xml.parsers.expat
from pyGlobus import io
from pyGlobus import security
def version():
return __version__
class SimpleLWXMLParser:
"""
A very simple LIGO_LW XML parser class that reads the only keeps
tables that do not contain the strings sngl_ or multi_
The class is not very robust as can have problems if the line
breaks do not appear in the standard places in the XML file.
"""
def __init__(self):
"""
Constructs an instance.
The private variable ignore_pat determines what tables we ignore.
"""
self.__p = xml.parsers.expat.ParserCreate()
self.__in_table = 0
self.__silent = 0
self.__ignore_pat = re.compile(r'.*(sngl_|multi_).*', re.IGNORECASE)
self.__p.StartElementHandler = self.start_element
self.__p.EndElementHandler = self.end_element
def __del__(self):
"""
Destroys an instance by shutting down and deleting the parser.
"""
self.__p("",1)
del self.__p
def start_element(self, name, attrs):
"""
Callback for start of an XML element. Checks to see if we are
about to start a table that matches the ignore pattern.
@param name: the name of the tag being opened
@type name: string
@param attrs: a dictionary of the attributes for the tag being opened
@type attrs: dictionary
"""
if name.lower() == "table":
for attr in attrs.keys():
if attr.lower() == "name":
if self.__ignore_pat.search(attrs[attr]):
self.__in_table = 1
def end_element(self, name):
"""
Callback for the end of an XML element. If the ignore flag is
set, reset it so we start outputing the table again.
@param name: the name of the tag being closed
@type name: string
"""
if name.lower() == "table":
if self.__in_table:
self.__in_table = 0
def parse_line(self, line):
"""
For each line we are passed, call the XML parser. Returns the
line if we are outside one of the ignored tables, otherwise
returns the empty string.
@param line: the line of the LIGO_LW XML file to be parsed
@type line: string
@return: the line of XML passed in or the null string
@rtype: string
"""
self.__p.Parse(line)
if self.__in_table:
self.__silent = 1
if not self.__silent:
ret = line
else:
ret = ""
if not self.__in_table:
self.__silent = 0
return ret
class LDBDClientException(Exception):
"""Exceptions returned by server"""
def __init__(self,args=None):
self.args = args
class LDBDClient(object):
def __init__(self, host, port, identity):
"""
Open a connection to a LDBD Server and return an instance of
class LDBDClient. One of the public methods can then be
called to send a request to the server.
@param host: the host on which the LDBD Server runs
@type host: string
@param port: port on which the LDBD Server listens
@type port: integer
@param identity: string which the LDBD Server identifies itself
@type identity: string
@return: Instance of LDBDClient
"""
try:
self.__connect__(host,port,identity)
except Exception, e:
raise
def __del__(self):
"""
Disconnect from the LDBD server.
@return: None
"""
self.__disconnect__()
def __connect__(self,host,port,identity):
"""
Attempt to open a connection to the LDBD Server
using the 'host' and 'port' and expecting the server
to identify itself with a corresponding host certificate.
A IOException is raised if the connection cannot be made,
but this is caught by the __init__ method above and
turned into a LDBDClient exception.
@param host: the host on which the LDBD Server runs
@type host: string
@param port: port on which the LDBD Server listens
@type port: integer
@param identity: string which the LDBD Server identifies itself
@type identity: string
@return: None
"""
# remove the globus tcp port range environment variable if set
try:
port_range = os.environ["GLOBUS_TCP_PORT_RANGE"]
os.environ["GLOBUS_TCP_PORT_RANGE"] = ""
except:
pass
self.host = host
self.port = port
self.identity = identity
# redirect stdout and stderror for now
try:
f = open("/dev/null", "w")
sys.stdout = f
sys.stderr = f
except:
pass
try:
# create TCPIOAttr instance
clientAttr = io.TCPIOAttr()
authData = io.AuthData()
soc = io.GSITCPSocket()
if identity is None:
# try an unauthenticated connection
clientAttr.set_authentication_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_NONE)
clientAttr.set_authorization_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_NONE, authData)
clientAttr.set_channel_mode(
io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR)
clientAttr.set_delegation_mode(
io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_NONE)
else:
# set authentication mode to be GSSAPI
clientAttr.set_authentication_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_GSSAPI)
# set expected identity
authData.set_identity(identity)
# set authorization, channel, and delegation modes
clientAttr.set_authorization_mode(
io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_IDENTITY, authData)
clientAttr.set_channel_mode(
io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR)
clientAttr.set_delegation_mode(
io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_LIMITED_PROXY)
soc.connect(host, port, clientAttr)
self.socket = soc
self.sfile = soc.makefile("rw")
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
f.close()
def __disconnect__(self):
"""
Disconnect from the LDBD Server.
@return: None
"""
try:
self.socket.shutdown(2)
except:
pass
def __response__(self):
"""
Read the response sent back by the LDBD Server. Parse out the
return code with 0 for success and non-zero for error, and then
the list of strings representing the returned result(s).
@return: tuple containing the integer error code and the list of
strings representing the output from the server
"""
f = self.sfile
response = ""
# Read in 512 byte chunks until there is nothing left to read.
# This blocks until the socket is ready for reading and until
# 512 bytes are received. If the message is less then 512 bytes
# this will block until the server closes the socket. Since
# the server always shuts down the socket after sending its
# reply this should continue to work for now.
while 1:
input = f.read(size = 512, waitForBytes = 512)
response += input
if len(input) < 512: break
# the response from the server must always end in a null byte
try:
if response[-1] != '\0':
msg = "Bad server reponse format. Contact server administrator."
raise LDBDClientException, msg
except:
msg = "Connection refused. The server may be down or you may not have" + \
"authorization to access this server. Contact server administrator."
raise LDBDClientException, msg
# delete the last \0 before splitting into strings
response = response[0:-1]
try:
stringList = response.split('\0')
code = int(stringList[0])
output = stringList[1:]
except Exception, e:
msg = "Error parsing response from server : %s" % e
try:
f.close()
except:
pass
raise LDBDClientException, msg
f.close()
return code, output
def ping(self):
"""
Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "PING\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error pinging server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def query(self,sql):
"""
Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "QUERY\0" + sql + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing query on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insert(self,xmltext):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "INSERT\0" + xmltext + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insertmap(self,xmltext,lfnpfn_dict):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
pmsg = cPickle.dumps(lfnpfn_dict)
msg = "INSERTMAP\0" + xmltext + "\0" + pmsg + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
def insertdmt(self,xmltext):
"""
Insert the LIGO_LW metadata in the xmltext string into the database.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "INSERTDMT\0" + xmltext + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing insert on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
|
ligo-cbc/pycbc-glue
|
pycbc_glue/LDBDClient.py
|
Python
|
gpl-3.0
| 11,113
|
#!/home/pi/.virtualenvs/cv2/bin/python
from picamera.array import PiRGBArray
from picamera import PiCamera
import picamera
from time import sleep
import time
import cv2
import numpy as np
import sys
import datetime
import boto3
import subprocess
import os
import pyowm
import commands
import multiprocessing
import threading
import json
import shlex
import csv
#AWS Rekognition variables
bucket_target_var = "tw37-opencv"
#bucket_source_var = "new_image_name.jpg"
key_source_var = "orignal_trevor_1706.jpg"
bucket_source_var = "tw37-original"
#AWS Rekognition Code - Face Comparison
def compare_faces(bucket, key, bucket_target, key_target, threshold=80, region="us-west-2"):
def WeatherProcessing():
#OWM Weather Data Functions
owm = pyowm.OWM('xxxxxxxxxxxxxxxxxxx') # You MUST provide a valid API key
#Search for current weather in Melbourne (Australia)
observation = owm.weather_at_place('Melbourne,au')
w = observation.get_weather()
#Get Weather details
Wind = w.get_wind() # {'speed': 4.6, 'deg': 330}
WindText = "espeak -g 10 \" Current wind Speed and Direction is " + format(Wind) + " \" "
#print (WindText)
SWind = w.get_wind()['speed'] # 4
SWindText = "espeak -g 10 \" Current wind Speed is " + format(SWind) + " knots \" "
Humidity = w.get_humidity() # 87
HumidityText = "espeak -g 10 \" Current humidity is " + format(Humidity) + " percent \" "
Temperature = w.get_temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
TemperatureText = "espeak -g 10 \" Current temperature is " + format(Temperature) + " degrees \" "
TemperatureAvg = w.get_temperature('celsius')['temp'] # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
TemperatureAvgText = "espeak -g 10 \" Current temperature is " + format(TemperatureAvg) + " degrees \" "
Clouds = w.get_clouds()
Rainfall = w.get_rain()
Pressure = w.get_pressure()
#subprocess.call(shlex.split(WindText))
subprocess.call(shlex.split(SWindText))
subprocess.call(shlex.split(HumidityText))
subprocess.call(shlex.split(TemperatureAvgText))
#Output for debugging purpose
#print (" ")
#print (" ")
#print ("****************************************************************************************************")
#print ("Current wind Speed and Direction right now in Melbourne is = %s " %Wind)
#print ("Current Temperature in Melbourne is = %s" %Temperature)
#print ("Current Humidity in Melbourne is = %s Percent" %Humidity)
#print ("Cloud ceiling across Melbourne is %s thousand feet" %Clouds)
#print ("Current Rainfall across Melbourne is %s " %Rainfall)
#print ("Barometric Pressure across Melbourne is %s " %Pressure)
#print ("****************************************************************************************************")
#print (" ")
#print (" ")
#Face Matching Code Starts Here
rekognition = boto3.client("rekognition", region)
response = rekognition.compare_faces(
SourceImage={
"S3Object": {
"Bucket": bucket,
"Name": key,
}
},
TargetImage={
"S3Object": {
"Bucket": bucket_target,
"Name": key_target,
}
},
SimilarityThreshold=threshold,
)
#Reading JSON and converting into workable format
#print(response)
temp1 = json.dumps(response)
temp2 = json.loads(temp1)
#print(temp2['FaceMatches'])
print "Source Face Confidence in %s " %format(temp2['SourceImageFace']['Confidence'])
for match in temp2['FaceMatches']:
print "*******************************************"
print " "
print "Similarity between compared faces is %s " %format(temp2['FaceMatches'][0]['Similarity'])
print " "
print "*******************************************"
#Writing timestamp to log file
now = datetime.datetime.now()
outputFile = open('/opt/data/face_capture_time_log.txt', 'a')
outputWriter = csv.writer(outputFile)
tempArray = [now]
outputWriter.writerow(tempArray)
outputFile.close()
#Reading older timestamp from log file
proc = subprocess.Popen(["tail -n 1 /opt/data/face_capture_time_log.txt | cut -d : -f 2"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
temp = out.strip()
oldtimestampminute = int(temp)
#Subtracting seconds to find the difference
diff = oldtimestampminute - now.minute
if abs(diff) > 1: #abs takes care of negative values and provides a positive number as the result
print "*******************************************"
print " "
print " !!! Speech To Text happens here!!!! "
print " "
print "*******************************************"
subprocess.call('espeak \" Hi Trevor Welcome back \" ', shell=True)
WeatherProcessing()
else:
print "****************************************************************************"
print " "
print ("Ain't bothering you because we just spotted you less than a a min ago")
print " "
print "****************************************************************************"
for nomatch in temp2['UnmatchedFaces']:
print "Faces either don't match or are a poor match"
return
#Main Code Section Starts Here
face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')
#nose_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.0.0/data/haarcascades/Nariz.xml')
camera = PiCamera()
camera.resolution = (640,480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640,480))
s3 = boto3.client('s3')
time.sleep(2)
#Clearing the buffer before loading the first image
rawCapture.truncate(0)
while True:
#time.sleep(1)
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
# iterate over all identified faces and try to find eyes
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
#The code on the next three lines works and has been tested out
#Disabling it because it's not required for purposes of identification of faces
#eyes = eye_cascade.detectMultiScale(roi_gray, minSize=(30, 30))
#for (ex,ey,ew,eh) in eyes:
#cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,0,0),2)
#Detection of code for noses has not been validated or tested
#noses = nose_cascade.detectMultiScale(roi_gray, minSize=(100, 30))
#for (ex,ey,ew,eh) in noses:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
#printing messages to the screen
print "At time "+time.strftime("%d/%m/%y-%H:%M:%S")+", found {0} faces in the picture!!!".format(len(faces))
#writing the image to the screen
font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, str(datetime.datetime.now().strftime("%d/%m/%y-%H/%M/%S")), (100,500), font, 4,(255,255,255),2)
cv2.putText(img, "DateTime - "+str(datetime.datetime.now().strftime("%d/%m/%y %H:%M:%S")), (5,25), font, 0.5,(255,255,255))
cv2.imshow('Mapping Faces within the Image', img)
#writing the image to a file
if len(faces) > 0:
#Older versions of cv2.imwrite
#cv2.imwrite("temp"+str(time.strftime("%d/%m/%y-%H%M%S"))+".jpg",img)
#cv2.imwrite("temp"+str(datetime.datetime.now())+".jpg",img)
#cv2.imwrite("temp"+str(datetime.datetime.now().strftime("%d/%m/%y-%H/%M/%S"))+".jpg",img)
#cv2.imwrite("FaceCaptureWarrenPi-"+str(datetime.datetime.now())+".jpg",img)
#current version of cv2.imwrite
#imagename = "FaceCaptureWarrenPi-" + format(str(datetime.datetime.now())) + ".jpg" #This also works
imagename = "FaceCaptureWarrenPi-" + format(str(time.strftime("%d%m%y-%H%M%S"))) + ".jpg"
writepath = "/home/pi/Downloads/TW_Experiments/Python_Projects/RaspiPythonProjects/OpenCV/CaptureVideoStream/imagecapture/" + imagename
cv2.imwrite(writepath, img)
print "Captured image to file !!!"
#Uploading files to AWS S3
with open(writepath, 'rb') as data:
s3.upload_fileobj(data, "tw37-opencv", imagename)
#Comparing images using AWS Rekognition
bucket_target_var = "tw37-opencv"
#key_target_var = "new_image_name.jpg"
key_source_var = "orignal_trevor_1706.jpg"
bucket_source_var = "tw37-original"
#source_face, matches = compare_faces(bucket_source_var, key_source_var, bucket_target_var, imagename)
#print "Source Face ({Confidence}%)".format(**source_face)
#one match for each target face
#for match in matches:
# print "Target Face ({Confidence}%)".format(**match['Face'])
# print " Similarity : {}%".format(match['Similarity'])
# if (match['Similarity'] > 80):
# print "Hi Trevor, Welcome back."
# subprocess.call("espeak \" Hi Trevor Welcome back \" ", shell=True)
# WeatherProcessing()
#Forking a thread to perform the AWS Rekognition Comparison
threads = []
t = threading.Thread(target=compare_faces, args=(bucket_source_var, key_source_var, bucket_target_var, imagename))
threads.append(t)
t.start()
#looking for escape sequence
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
print "Quitting....hold on"
break
#Clearing the buffer before loading the next image
rawCapture.truncate(0)
#Closing the capture, releasing all resources
#rawCapture.release()
cv2.destroyAllWindows()
|
tangowhisky37/RaspiPythonProjects
|
OpenCV/CaptureVideoStream/CaptureVideoStream_v0.21.py
|
Python
|
gpl-3.0
| 10,532
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.data_processing import base as dp_base
from tempest.common.utils import data_utils
from tempest import test
class JobBinaryInternalTest(dp_base.BaseDataProcessingTest):
"""Link to the API documentation is http://docs.openstack.org/developer/
sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
"""
@classmethod
def resource_setup(cls):
super(JobBinaryInternalTest, cls).resource_setup()
cls.job_binary_internal_data = 'Some script may be data'
def _create_job_binary_internal(self, binary_name=None):
"""Creates Job Binary Internal with optional name specified.
It puts data into Sahara database and ensures job binary internal name.
Returns id and name of created job binary internal.
"""
if not binary_name:
# generate random name if it's not specified
binary_name = data_utils.rand_name('sahara-job-binary-internal')
# create job binary internal
resp_body = (
self.create_job_binary_internal(binary_name,
self.job_binary_internal_data))
# ensure that job binary internal created successfully
self.assertEqual(binary_name, resp_body['name'])
return resp_body['id'], binary_name
@test.attr(type='smoke')
def test_job_binary_internal_create(self):
self._create_job_binary_internal()
@test.attr(type='smoke')
def test_job_binary_internal_list(self):
binary_info = self._create_job_binary_internal()
# check for job binary internal in list
_, binaries = self.client.list_job_binary_internals()
binaries_info = [(binary['id'], binary['name']) for binary in binaries]
self.assertIn(binary_info, binaries_info)
@test.attr(type='smoke')
def test_job_binary_internal_get(self):
binary_id, binary_name = self._create_job_binary_internal()
# check job binary internal fetch by id
_, binary = self.client.get_job_binary_internal(binary_id)
self.assertEqual(binary_name, binary['name'])
@test.attr(type='smoke')
def test_job_binary_internal_delete(self):
binary_id, _ = self._create_job_binary_internal()
# delete the job binary internal by id
self.client.delete_job_binary_internal(binary_id)
@test.attr(type='smoke')
def test_job_binary_internal_get_data(self):
binary_id, _ = self._create_job_binary_internal()
# get data of job binary internal by id
_, data = self.client.get_job_binary_internal_data(binary_id)
self.assertEqual(data, self.job_binary_internal_data)
|
ebagdasa/tempest
|
tempest/api/data_processing/test_job_binary_internals.py
|
Python
|
apache-2.0
| 3,285
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2009 John Iacona
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import re, os
import cgi
import urllib
import xml.dom.minidom as dom
from mako.template import Template
import rb
import LastFM
from gi.repository import WebKit
from gi.repository import GObject, Gtk
from gi.repository import RB
import gettext
gettext.install('rhythmbox', RB.locale_dir())
class ArtistTab (GObject.GObject):
__gsignals__ = {
'switch-tab' : (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE,
(GObject.TYPE_STRING,))
}
def __init__ (self, shell, buttons, ds, view):
GObject.GObject.__init__ (self)
self.shell = shell
self.sp = shell.props.shell_player
self.db = shell.props.db
self.buttons = buttons
self.button = Gtk.ToggleButton (label=_("Artist"))
self.datasource = ds
self.view = view
self.artist = None
self.active = False
self.button.show()
self.button.set_relief (Gtk.ReliefStyle.NONE)
self.button.set_focus_on_click(False)
self.button.connect ('clicked',
lambda button : self.emit('switch-tab', 'artist'))
buttons.pack_start (self.button, True, True, 0)
def activate (self):
print "activating Artist Tab"
self.button.set_active(True)
self.active = True
self.reload ()
def deactivate (self):
print "deactivating Artist Tab"
self.button.set_active(False)
self.active = False
def reload (self):
entry = self.sp.get_playing_entry ()
if entry is None:
print "Nothing playing"
return None
artist = entry.get_string (RB.RhythmDBPropType.ARTIST)
if self.active and self.artist != artist:
self.datasource.fetch_artist_data (artist)
self.view.loading (artist)
else:
self.view.load_view()
self.artist = artist
class ArtistView (GObject.GObject):
def __init__ (self, shell, plugin, webview, ds):
GObject.GObject.__init__ (self)
self.webview = webview
self.ds = ds
self.shell = shell
self.plugin = plugin
self.file = ""
plugindir = plugin.plugin_info.get_data_dir()
self.basepath = "file://" + urllib.pathname2url (plugindir)
self.load_tmpl ()
self.connect_signals ()
def load_view (self):
self.webview.load_string (self.file, 'text/html', 'utf-8', self.basepath)
def loading (self, current_artist):
self.loading_file = self.loading_template.render (
artist = current_artist,
info = _("Loading biography for %s") % current_artist,
song = "",
basepath = self.basepath)
self.webview.load_string (self.loading_file, 'text/html', 'utf-8', self.basepath)
def load_tmpl (self):
self.path = rb.find_plugin_file(self.plugin, 'tmpl/artist-tmpl.html')
self.loading_path = rb.find_plugin_file (self.plugin, 'tmpl/loading.html')
self.template = Template (filename = self.path, module_directory = self.plugin.tempdir)
self.loading_template = Template (filename = self.loading_path, module_directory = self.plugin.tempdir)
self.styles = self.basepath + '/tmpl/main.css'
def connect_signals (self):
self.air_id = self.ds.connect ('artist-info-ready', self.artist_info_ready)
def artist_info_ready (self, ds):
# Can only be called after the artist-info-ready signal has fired.
# If called any other time, the behavior is undefined
try:
info = ds.get_artist_info ()
small, med, big = info['images'] or (None, None, None)
summary, full_bio = info['bio'] or (None, None)
self.file = self.template.render (artist = ds.get_current_artist (),
error = ds.get_error (),
image = med,
fullbio = full_bio,
shortbio = summary,
datasource = LastFM.datasource_link (self.basepath),
stylesheet = self.styles )
self.load_view ()
except Exception, e:
print "Problem in info ready: %s" % e
class ArtistDataSource (GObject.GObject):
__gsignals__ = {
'artist-info-ready' : (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),
'artist-similar-ready' : (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),
'artist-top-tracks-ready' : (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),
'artist-top-albums-ready' : (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, ()),
}
def __init__ (self, info_cache, ranking_cache):
GObject.GObject.__init__ (self)
self.current_artist = None
self.error = None
self.artist = {
'info' : {
'data' : None,
'signal' : 'artist-info-ready',
'function' : 'getinfo',
'cache' : info_cache,
'parsed' : False,
},
'similar' : {
'data' : None,
'signal' : 'artist-similar-ready',
'function' : 'getsimilar',
'cache' : info_cache,
'parsed' : False,
},
'top_albums' : {
'data' : None,
'signal' : 'artist-top-albums-ready',
'function' : 'gettopalbums',
'cache' : ranking_cache,
'parsed' : False,
},
'top_tracks' : {
'data' : None,
'signal' : 'artist-top-tracks-ready',
'function' : 'gettoptracks',
'cache' : ranking_cache,
'parsed' : False,
},
}
def extract (self, data, position):
"""
Safely extract the data from an xml node. Returns data
at position or None if position does not exist
"""
try:
return data[position].firstChild.data
except Exception, e:
return None
def fetch_top_tracks (self, artist):
if LastFM.user_has_account() is False:
return
artist = urllib.quote_plus (artist)
function = self.artist['top_tracks']['function']
cache = self.artist['top_tracks']['cache']
cachekey = "lastfm:artist:%s:%s" % (function, artist)
url = '%sartist.%s&artist=%s&api_key=%s' % (LastFM.URL_PREFIX,
function, artist, LastFM.API_KEY)
cache.fetch(cachekey, url, self.fetch_artist_data_cb, self.artist['top_tracks'])
def fetch_artist_data (self, artist):
"""
Initiate the fetching of all artist data. Fetches artist info, similar
artists, artist top albums and top tracks. Downloads XML files from last.fm
and saves as parsed DOM documents in self.artist dictionary. Must be called
before any of the get_* methods.
"""
self.current_artist = artist
if LastFM.user_has_account() is False:
self.error = LastFM.NO_ACCOUNT_ERROR
self.emit ('artist-info-ready')
return
self.error = None
artist = urllib.quote_plus (artist)
for key, value in self.artist.items():
cachekey = "lastfm:artist:%s:%s" % (value['function'], artist)
url = '%sartist.%s&artist=%s&api_key=%s' % (LastFM.URL_PREFIX,
value['function'], artist, LastFM.API_KEY)
value['cache'].fetch(cachekey, url, self.fetch_artist_data_cb, value)
def fetch_artist_data_cb (self, data, category):
if data is None:
print "no data fetched for artist %s" % category['function']
return
try:
category['data'] = dom.parseString (data)
category['parsed'] = False
self.emit (category['signal'])
except Exception, e:
print "Error parsing artist %s: %s" % (category['function'], e)
return False
def get_current_artist (self):
return self.current_artist
def get_error (self):
return self.error
def get_top_albums (self):
if not self.artist['top_albums']['parsed']:
albums = []
for album in self.artist['top_albums']['data'].getElementsByTagName ('album'):
album_name = self.extract(album.getElementsByTagName ('name'), 0)
imgs = album.getElementsByTagName ('image')
images = self.extract(imgs, 0), self.extract(imgs, 1), self.extract(imgs,2)
albums.append ((album_name, images))
self.artist['top_albums']['data'] = albums
self.artist['top_albums']['parsed'] = True
return self.artist['top_albums']['data']
def get_similar_artists (self):
"""
Returns a list of similar artists
"""
data = self.artist['similar']['data']
if data is None:
return None
if not self.artist['similar']['parsed']:
lst = []
for node in data.getElementsByTagName ('artist'):
artist = self.extract(node.getElementsByTagName('name'), 0)
similar = self.extract(node.getElementsByTagName('match') ,0)
image = self.extract(node.getElementsByTagName('image'), 0)
lst.append ((artist, similar, image))
data = lst
self.artist['similar']['parsed'] = True
self.artist['similar']['data'] = data
return data
def get_artist_images (self):
"""
Returns tuple of image url's for small, medium, and large images.
"""
data = self.artist['info']['data']
if data is None:
return None
images = data.getElementsByTagName ('image')
return self.extract(images,0), self.extract(images,1), self.extract(images,2)
def get_artist_bio (self):
"""
Returns tuple of summary and full bio
"""
data = self.artist['info']['data']
if data is None:
return None
if not self.artist['info']['parsed']:
content = self.extract(data.getElementsByTagName ('content'), 0)
summary = self.extract(data.getElementsByTagName ('summary'), 0)
return summary, content
return self.artist['info']['data']['bio']
def get_artist_info (self):
"""
Returns the dictionary { 'images', 'bio' }
"""
if not self.artist['info']['parsed']:
images = self.get_artist_images()
bio = self.get_artist_bio()
self.artist['info']['data'] = { 'images' : images,
'bio' : bio }
self.artist['info']['parsed'] = True
return self.artist['info']['data']
def get_top_tracks (self):
"""
Returns a list of the top track titles
"""
data = self.artist['top_tracks']['data']
if data is None:
return None
if not self.artist['top_tracks']['parsed']:
tracks = []
for track in data.getElementsByTagName ('track'):
name = self.extract(track.getElementsByTagName('name'), 0)
tracks.append (name)
self.artist['top_tracks']['data'] = tracks
self.artist['top_tracks']['parsed'] = True
return self.artist['top_tracks']['data']
|
dardevelin/rhythmbox-shuffle
|
plugins/context/ArtistTab.py
|
Python
|
gpl-2.0
| 13,087
|
#!/usr/bin/python
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from column import getColumn
def createParser ():
parser = argparse.ArgumentParser()
parser.add_argument ('--dist', action='store_const', const=True)
parser.add_argument ('--apr', action='store_const', const=True)
parser.add_argument ('--dif', action='store_const', const=True)
parser.add_argument ('--file', nargs='+', default="graph.csv", action="store")
parser.add_argument ('-t', '--title', default='title', action="store")
parser.add_argument ('-o', '--output', default='graph.png', action="store")
parser.add_argument ('-x', '--xlabel', default='x', action="store")
parser.add_argument ('-y', '--ylabel', default='y', action="store")
parser.add_argument ('-l', '--label', default='leght', action="store")
return parser
def computeDist (xfile0, xfile1):
xx0 = np.array(getColumn(xfile0,1)).astype(float)
xx1 = np.array(getColumn(xfile1,1)).astype(float)
xy0 = np.array(getColumn(xfile0,2)).astype(float)
xy1 = np.array(getColumn(xfile1,2)).astype(float)
xz0 = np.array(getColumn(xfile0,3)).astype(float)
xz1 = np.array(getColumn(xfile1,3)).astype(float)
dist = np.sqrt((xx0 - xx1)**2 + (xy0 - xy1)**2 + (xz0 - xz1)**2)
return dist
if __name__ == '__main__':
parser = createParser()
namespace = parser.parse_args(sys.argv[1:])
title = '{}'.format(namespace.title)
output = '{}'.format(namespace.output)
xlabel = '{}'.format(namespace.xlabel)
ylabel = '{}'.format(namespace.ylabel)
label = '{}'.format(namespace.label)
if namespace.dist:
stepf = '{}'.format(namespace.file[0])
step = np.array(getColumn(stepf,0)).astype(float)
xfile0 = '{}'.format(namespace.file[0])
xfile1 = '{}'.format(namespace.file[1])
lenght = computeDist(xfile0,xfile1)
plt.figure(title)
if namespace.apr:
p = np.polyfit(step,lenght,1)
if namespace.dif:
vel=p[0]/0.001*0.1
plt.plot(step, lenght, 'g', label=vel)
else:
plt.plot(step, lenght, 'g')
plt.plot(step,p[0]*step+p[1],'k-', label=p[1])
plt.legend()
else:
plt.plot(step, lenght, 'g')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
plt.savefig(output, dpi=150)
|
zhekan/MDrun
|
src/distance.py
|
Python
|
agpl-3.0
| 2,447
|
'''
Import command for Winthrop team's spreadsheet. It can be invoked using::
python manage.py import_nysql [--justsammel] /path/to/csv
The ``--justsammel`` flag skips import of records to avoid
reproducing duplicates, but rebuilds the ``is_sammelband`` flag set and
produces an output list.
The expect behavior is designed for a once-off import and will produce
duplicate book entries (but not duplicates of any entries created
as part of book creation).
All persons created attempt to have a VIAF uri associated and all places
have a Geonames ID assigned if possible.
'''
from collections import defaultdict
from itertools import chain
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from winthrop.books.models import Book, Publisher, OwningInstitution, \
Catalogue
from winthrop.people.models import Person
from winthrop.people.viaf import ViafAPI
from winthrop.places.models import Place
from winthrop.places.geonames import GeoNamesAPI
class Command(BaseCommand):
'''Import NYSL book data into the database from a CSV file'''
help = __doc__
#: mapping of book model fields that can be filled in exactly as is
#: from corresponding columns in the spreadsheet data
fields_exact = {
'title': 'Title',
'short_title': 'Short Title',
'red_catalog_number': 'RED catalogue number at the front',
'ink_catalog_number': 'INK catalogue number at the front',
'pencil_catalog_number': 'PENCIL catalogue number at the front',
'original_pub_info': 'PUB INFO - Original',
'notes': 'Notes'
}
#: fields that require cleanup, related model lookup, or other logic
fields = {
'pub_year': 'Year of Publication',
'is_annotated': 'Annotated?',
'flagged_info': 'FLAGGED PAGES FOR REPRODUCTION',
'pub_place': 'Modern Place of Publication',
'publisher': 'Standardized Name of Publisher',
# NYSL cataloguing information
'nysl_call_number': 'NYSL CALL NUMBER',
'nysl_notes': 'NYSL -- NOTES'
}
# creator type and corresponding column in the spreadsheet
creators = {
'Author': 'AUTHOR, Standarized',
'Translator': 'Translator',
'Editor': 'Editor',
}
# currently unused
other_fields = [
'Number of Pages',
'Type of Volume',
'Subject Tagging (separate with semicolons)',
'EDITION',
'Books with important relationships to this text (separate with semicolons)',
'NYSL DESCRIPTION',
'Other documents that demonstrate this relationship (separate with semicolon)',
'Provenance',
'Physical Size'
]
def add_arguments(self, parser):
parser.add_argument('input_file')
parser.add_argument(
'--justsammel',
action='store_true',
dest='just_sammel',
default=False,
help='Just make sammelband connections'
)
def handle(self, *args, **kwargs):
input_file = kwargs['input_file']
self.mocking = False
# TODO: create fixture for NYSL & NYC ?
# all books will be catalogued with NYSL, so look for
# owning instution object first
# (no need to check because NYSL is preloaded by migrations)
self.nysl = OwningInstitution.objects.get(short_name='NYSL')
self.stats = defaultdict(int)
if not kwargs['just_sammel']:
with open(input_file) as csvfile:
csvreader = csv.DictReader(csvfile)
# each row in the CSV corresponds to a book record
for row in csvreader:
try:
self.create_book(row)
except Exception as err:
print('Error on import for %s: %s' %
(row['Short Title'][:30], err))
self.stats['err'] += 1
# summarize what content was imported/created
self.stdout.write('''Imported content:
%(book)d books
%(place)d places
%(person)d people
%(publisher)d publishers
%(err)d errors''' % self.stats)
# Now look for is_sammelband and set the flag
self.build_sammelband()
def viaf_lookup(self, name):
viaf = ViafAPI()
viafid = None
results = viaf.suggest(name)
# Handle no results
if results:
# Check for a 'nametype' and make sure it's personal
if 'nametype' in results[0]:
if results[0]['nametype'] == 'personal':
viafid = viaf.uri_from_id(results[0]['viafid'])
return viafid
def geonames_lookup(self, place_name):
'''Function to wrap a GeoNames lookup and assign info.
Returns a dict for Place generator or None'''
geo = GeoNamesAPI()
# Get the top hit and presume the API guessed correctly
result = geo.search(place_name, max_rows=1)
place_dict = {}
if result:
place_dict['latitude'] = float(result[0]['lat'])
place_dict['longitude'] = float(result[0]['lng'])
place_dict['geonames_id'] = geo.uri_from_id(result[0]['geonameId'])
return place_dict
else:
return None
def create_book(self, data):
# create a new book and all related models from
# a row of data in the spreadsheet
# nysl books, therefore assuming all are extant
newbook = Book(is_extant=True)
# set fields that can be mapped directly from the spreadsheet
# aside from removing periods
for model_field, csv_field in self.fields_exact.items():
value = data[csv_field]
# special case: some of the catalog numbers have
# been entered as "NA" in the spreadsheet; skip those
if model_field.endswith('catalog_number') and \
value == 'NA':
continue
# special case: some books are missing a short title
# supply those with first three words of title
if model_field == 'short_title' and not value:
words = data['Title'].strip('. ').split()
value = (' '.join(words[0:3])).strip('.')
# special case: strip periods for title and short_title
if model_field == 'title':
value = data['Title'].strip('. ')
setattr(newbook, model_field, value)
# handle book fields that require some logic
# - publication year might have brackets, e.g. [1566],
# but model stores it as an integer
stripped_spaces_only = data[self.fields['pub_year']].strip()
pub_year = data[self.fields['pub_year']].strip('[]?.nd ')
if re.search(r'-|i\.e\.', pub_year):
if newbook.notes:
newbook.notes += '\n\nAdditional Publication Year Info: %s' %\
stripped_spaces_only
else:
newbook.notes = 'Additional Publication Year Info: %s' %\
stripped_spaces_only
pub_year = (re.match(r'\d+?(?=\D)', pub_year)).group(0)
if pub_year:
newbook.pub_year = pub_year
# - is annotated; spreadsheet has variants in upper/lower case
# and trailing periods; in some cases there are notes;
# for now, assuming that anything ambiguous should be false here
annotated = data[self.fields['is_annotated']].lower().strip('. ')
newbook.is_annotated = (annotated == 'yes')
# - flagged_info; pull info for flagged pages and add if it exists
if annotated == 'yes':
flagged_info = data[self.fields['flagged_info']].strip()
if flagged_info:
if newbook.notes:
newbook.notes += '\n\nReproduction Recommendation: %s' %\
flagged_info
else:
newbook.notes = 'Reproduction Recommendation: %s' %\
flagged_info
# add required relationships before saving the new book
# - place
placename = data[self.fields['pub_place']].strip(' ?[]()')
if placename and len((re.sub(r'[.,]', '', placename))) < 3:
placename = None
if placename:
try:
place = Place.objects.get(name=placename)
except Place.DoesNotExist:
place_dict = self.geonames_lookup(placename)
if place_dict:
place = Place.objects.create(name=placename, **place_dict)
else:
place = Place.objects.create(
name=placename,
latitude=0.0,
longitude=0.0,
)
self.stats['place'] += 1
newbook.pub_place = place
# - publisher
publisher_name = data[self.fields['publisher']].strip("?. ")
# Catch np/sn
if publisher_name and len(publisher_name) < 4:
publisher_name = None
if publisher_name:
try:
publisher = Publisher.objects.get(name=publisher_name)
except Publisher.DoesNotExist:
publisher = Publisher.objects.create(name=publisher_name)
self.stats['publisher'] += 1
newbook.publisher = publisher
newbook.save()
# TODO: do we need to handle multiple creators here?
for creator_type, csv_field in self.creators.items():
# name could be empty (e.g. for translator, editor)
name = data[csv_field]
# Get rid of any last stray periods, if they exist
name = name.strip('?. []')
# Get various versions of 'Not sure' and remove name if they exist
if re.search(r'[Vv]arious|[A|a]nonymous|[N|n]one [G|g]iven', name):
name = None
# Use four characters as a dumb filter to toss stray 'np'/'sn'
if name and len(name) <= 4:
name = None
if name:
try:
person = Person.objects.get(authorized_name=name)
except Person.DoesNotExist:
viafid = self.viaf_lookup(name)
person = Person.objects.create(authorized_name=name,
viaf_id=viafid)
self.stats['person'] += 1
newbook.add_creator(person, creator_type)
# catalogue as a current NYSL book
Catalogue.objects.create(institution=self.nysl, book=newbook,
is_current=True,
call_number=data[self.fields['nysl_call_number']],
notes=data[self.fields['nysl_notes']])
self.stats['book'] += 1
def build_sammelband(self):
'''Create sammelband flag for books with same/similar NYSL catalog numbers'''
# All the catalogues just created
catalogue_set = Catalogue.objects.all()
# Call number list, not yet made unique
call_nos = []
self.stdout.write('Now checking for bound volumes:')
for catalogue in catalogue_set:
# Remove letters that obscure sammelbands
call_search = (catalogue.call_number).strip('abcdefgh')
match_count = 0
for entry in catalogue_set:
search_re = re.compile(r'%s$' % call_search)
if re.match(search_re,
(entry.call_number).strip('abcdefgh')):
match_count += 1
# If match happened more than once, assume sammelband
if match_count > 1:
call_nos.append(catalogue.call_number)
catalogue.is_sammelband = True
catalogue.save()
# A sorted unique vol list
sorted_vols = sorted(list(set(call_nos)))
# Get a list of books that are associated with a sammelband entry
cat_list = []
for number in sorted_vols:
q = Catalogue.objects.filter(call_number=number)
cat_list = chain(cat_list, q)
self.stdout.write(' Number of call numbers that seem to have '
'multiple bound titles: %s' % len(sorted_vols))
self.stdout.write('The following titles are marked as sammelband:')
# Good old fashioned for-loop with iterator to build a list for the team
i = 1
for cat in cat_list:
self.stdout.write(' %s. Short Title: %s - NYSL Call Number: %s'
% (i, cat.book.short_title, cat.call_number))
i += 1
|
Princeton-CDH/winthrop-django
|
winthrop/books/management/commands/import_nysl.py
|
Python
|
apache-2.0
| 12,753
|
# coding=utf-8
# Copyright 2016 Flowdas Inc. <prospero@flowdas.com>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pkg_resources import get_distribution
__author__ = u'오동권(Dong-gweon Oh) <prospero@flowdas.com>'
__version__ = getattr(get_distribution('flowdas-meta'), 'version', None)
del get_distribution
from .type import *
from .property import *
from .entity import *
from .stdtypes import *
|
flowdas/meta
|
flowdas/meta/__init__.py
|
Python
|
mpl-2.0
| 563
|
import json
import pytest
from awx.main.models.credential import CredentialType, Credential
from awx.api.versioning import reverse
@pytest.mark.django_db
def test_list_as_unauthorized_xfail(get):
response = get(reverse('api:credential_type_list'))
assert response.status_code == 401
@pytest.mark.django_db
@pytest.mark.parametrize('method, valid', [
('GET', sorted(dict(CredentialType.KIND_CHOICES).keys())),
('POST', ['cloud', 'net']),
])
def test_options_valid_kinds(method, valid, options, admin):
response = options(reverse('api:credential_type_list'), admin)
choices = sorted(dict(response.data['actions'][method]['kind']['choices']).keys())
assert valid == choices
@pytest.mark.django_db
def test_options_valid_put_kinds(options, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = options(reverse('api:credential_type_detail', kwargs={'pk': ssh.pk}), admin)
choices = sorted(dict(response.data['actions']['PUT']['kind']['choices']).keys())
assert ['cloud', 'net'] == choices
@pytest.mark.django_db
def test_list_as_normal_user(get, alice):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = get(reverse('api:credential_type_list'), alice)
assert response.status_code == 200
assert response.data['count'] == 1
@pytest.mark.django_db
def test_list_as_admin(get, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = get(reverse('api:credential_type_list'), admin)
assert response.status_code == 200
assert response.data['count'] == 1
@pytest.mark.django_db
def test_create_as_unauthorized_xfail(get, post):
response = post(reverse('api:credential_type_list'), {
'name': 'Custom Credential Type',
})
assert response.status_code == 401
@pytest.mark.django_db
def test_update_as_unauthorized_xfail(patch, delete):
ssh = CredentialType.defaults['ssh']()
ssh.save()
url = reverse('api:credential_type_detail', kwargs={'pk': ssh.pk})
response = patch(url, {'name': 'Some Other Name'})
assert response.status_code == 401
assert delete(url).status_code == 401
@pytest.mark.django_db
def test_update_managed_by_tower_xfail(patch, delete, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
url = reverse('api:credential_type_detail', kwargs={'pk': ssh.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 403
assert delete(url, admin).status_code == 403
@pytest.mark.django_db
def test_update_credential_type_in_use_xfail(patch, delete, admin):
_type = CredentialType(kind='cloud', inputs={'fields': []})
_type.save()
Credential(credential_type=_type, name='My Custom Cred').save()
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 200
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'inputs': {}}, admin)
assert response.status_code == 403
assert delete(url, admin).status_code == 403
@pytest.mark.django_db
def test_update_credential_type_success(get, patch, delete, admin):
_type = CredentialType(kind='cloud')
_type.save()
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 200
assert get(url, admin).data.get('name') == 'Some Other Name'
assert delete(url, admin).status_code == 204
@pytest.mark.django_db
def test_delete_as_unauthorized_xfail(delete):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = delete(
reverse('api:credential_type_detail', kwargs={'pk': ssh.pk}),
)
assert response.status_code == 401
@pytest.mark.django_db
def test_create_as_normal_user_xfail(get, post, alice):
response = post(reverse('api:credential_type_list'), {
'name': 'Custom Credential Type',
}, alice)
assert response.status_code == 403
assert get(reverse('api:credential_type_list'), alice).data['count'] == 0
@pytest.mark.django_db
def test_create_as_admin(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
assert response.data['results'][0]['name'] == 'Custom Credential Type'
assert response.data['results'][0]['inputs'] == {}
assert response.data['results'][0]['injectors'] == {}
assert response.data['results'][0]['managed_by_tower'] is False
@pytest.mark.django_db
def test_create_managed_by_tower_readonly(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {},
'injectors': {},
'managed_by_tower': True
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
assert response.data['results'][0]['managed_by_tower'] is False
@pytest.mark.django_db
def test_create_dependencies_not_supported(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {'dependencies': {'foo': ['bar']}},
'injectors': {},
}, admin)
assert response.status_code == 400
assert response.data['inputs'] == ["'dependencies' is not supported for custom credentials."]
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['cloud', 'net'])
def test_create_valid_kind(kind, get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': kind,
'name': 'My Custom Type',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['ssh', 'vault', 'scm', 'insights'])
def test_create_invalid_kind(kind, get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': kind,
'name': 'My Custom Type',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 400
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
def test_create_with_valid_inputs(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
fields = response.data['results'][0]['inputs']['fields']
assert len(fields) == 1
assert fields[0]['id'] == 'api_token'
assert fields[0]['label'] == 'API Token'
assert fields[0]['secret'] is True
assert fields[0]['type'] == 'string'
@pytest.mark.django_db
def test_create_with_required_inputs(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}],
'required': ['api_token'],
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
required = response.data['results'][0]['inputs']['required']
assert required == ['api_token']
@pytest.mark.django_db
@pytest.mark.parametrize('inputs', [
True,
100,
[1, 2, 3, 4],
'malformed',
{'feelds': {}},
{'fields': [123, 234, 345]},
{'fields': [{'id':'one', 'label':'One'}, 234]},
{'feelds': {}, 'fields': [{'id':'one', 'label':'One'}, 234]}
])
def test_create_with_invalid_inputs_xfail(post, admin, inputs):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': inputs,
'injectors': {}
}, admin)
assert response.status_code == 400
@pytest.mark.django_db
@pytest.mark.parametrize('injectors', [
True,
100,
[1, 2, 3, 4],
'malformed',
{'mal': 'formed'},
{'env': {'ENV_VAR': 123}, 'mal': 'formed'},
{'env': True},
{'env': [1, 2, 3]},
{'file': True},
{'file': [1, 2, 3]},
{'extra_vars': True},
{'extra_vars': [1, 2, 3]},
])
def test_create_with_invalid_injectors_xfail(post, admin, injectors):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {},
'injectors': injectors,
}, admin)
assert response.status_code == 400
@pytest.mark.django_db
def test_ask_at_runtime_xfail(get, post, admin):
# ask_at_runtime is only supported by the built-in SSH and Vault types
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True,
'ask_at_runtime': True
}]
},
'injectors': {
'env': {
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
}
}
}, admin)
assert response.status_code == 400
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
def test_create_with_valid_injectors(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {
'env': {
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
}
}, admin, expect=201)
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
injectors = response.data['results'][0]['injectors']
assert len(injectors) == 1
assert injectors['env'] == {
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
@pytest.mark.django_db
def test_create_with_undefined_template_variable_xfail(post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {
'env': {'AWX_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
}
}, admin)
assert response.status_code == 400
assert "'api_tolkien' is undefined" in json.dumps(response.data)
|
wwitzel3/awx
|
awx/main/tests/functional/api/test_credential_type.py
|
Python
|
apache-2.0
| 12,160
|
import json
from pathlib import Path
from i3configger import config
from i3configger.build import persist_results
def test_initialization(tmp_path, monkeypatch):
"""Given empty sources directory a new config is created from defaults"""
monkeypatch.setattr(config, "get_i3wm_config_path", lambda: tmp_path)
assert not (tmp_path / "config.d").exists()
config.ensure_i3_configger_sanity()
cnf = config.I3configgerConfig()
assert cnf.configPath.exists()
assert cnf.configPath.is_file()
assert cnf.configPath.name == config.I3configgerConfig.CONFIG_NAME
payload = json.loads(cnf.configPath.read_text())
assert "main" in payload
assert "bars" in payload
assert "targets" in payload["bars"]
assert "set" not in payload
assert "select" not in payload
assert "shadow" not in payload
def test_config_backup_is_not_overwritten(tmp_path):
"""Given an existing backup it is not overwritten by subsequent builds."""
firstThing = "first thing"
somePath = tmp_path / "some-path.txt"
backupPath = Path(str(somePath) + ".bak")
somePath.write_text(firstThing)
persist_results({somePath: firstThing})
assert backupPath.read_text() == firstThing
otherThing = "other thing"
persist_results({somePath: otherThing})
assert somePath.read_text() == otherThing
assert backupPath.read_text() == firstThing
|
obestwalter/i3configger
|
tests/test_config.py
|
Python
|
mit
| 1,387
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from modules.libraries import client
def resolve(url):
try:
id = re.compile('//.+?/.+?/([\w]+)').findall(url)
id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
id = id[0]
url = 'http://embed.novamov.com/embed.php?v=%s' % id
result = client.request(url)
key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
except: pass
url = 'http://www.novamov.com/api/player.api.php?key=%s&file=%s' % (key, id)
result = client.request(url)
url = re.compile('url=(.+?)&').findall(result)[0]
return url
except:
return
|
marnnie/Cable-buenaventura
|
plugin.video.genesis/modules/resolvers/novamov.py
|
Python
|
gpl-2.0
| 1,447
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-memcache
# [START memcache_v1beta2_generated_CloudMemcache_UpdateInstance_async]
from google.cloud import memcache_v1beta2
async def sample_update_instance():
# Create a client
client = memcache_v1beta2.CloudMemcacheAsyncClient()
# Initialize request argument(s)
resource = memcache_v1beta2.Instance()
resource.name = "name_value"
resource.node_count = 1070
resource.node_config.cpu_count = 976
resource.node_config.memory_size_mb = 1505
request = memcache_v1beta2.UpdateInstanceRequest(
resource=resource,
)
# Make the request
operation = client.update_instance(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END memcache_v1beta2_generated_CloudMemcache_UpdateInstance_async]
|
googleapis/python-memcache
|
samples/generated_samples/memcache_v1beta2_generated_cloud_memcache_update_instance_async.py
|
Python
|
apache-2.0
| 1,779
|
#!/usr/bin/python
#: ----------------------------------------------------------------------------
#: Copyright (C) 2017 Verizon. All Rights Reserved.
#: All Rights Reserved
#:
#: file: transform.py
#: details: memsql pipline transform python script
#: author: Mehrdad Arshad Rad
#: date: 04/27/2017
#:
#: Licensed under the Apache License, Version 2.0 (the "License");
#: you may not use this file except in compliance with the License.
#: You may obtain a copy of the License at
#:
#: http://www.apache.org/licenses/LICENSE-2.0
#:
#: Unless required by applicable law or agreed to in writing, software
#: distributed under the License is distributed on an "AS IS" BASIS,
#: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#: See the License for the specific language governing permissions and
#: limitations under the License.
#: ----------------------------------------------------------------------------
import json
import struct
import sys
import time
def transform_records():
while True:
byte_len = sys.stdin.read(8)
if len(byte_len) == 8:
byte_len = struct.unpack("L", byte_len)[0]
result = sys.stdin.read(byte_len)
yield result
else:
assert len(byte_len) == 0, byte_len
return
for records in transform_records():
flows = json.loads(records)
exported_time = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(flows["Header"]["ExportTime"]))
try:
for flow in flows["DataSets"]:
sourceIPAddress = "unknown"
destinationIPAddress = "unknown"
bgpSourceAsNumber = "unknown"
bgpDestinationAsNumber = "unknown"
protocolIdentifier = 0
sourceTransportPort = 0
destinationTransportPort = 0
tcpControlBits = "unknown"
ipNextHopIPAddress = "unknown"
octetDeltaCount = 0
ingressInterface = 0
egressInterface = 0
for field in flow:
if field["I"] in [214]:
raise
elif field["I"] in [8, 27]:
sourceIPAddress = field["V"]
elif field["I"] in [12, 28]:
destinationIPAddress = field["V"]
elif field["I"] in [15, 62]:
ipNextHopIPAddress = field["V"]
elif field["I"] == 16:
bgpSourceAsNumber = field["V"]
elif field["I"] == 17:
bgpDestinationAsNumber = field["V"]
elif field["I"] == 14:
ingressInterface = field["V"]
elif field["I"] == 10:
egressInterface = field["V"]
elif field["I"] == 7:
sourceTransportPort = field["V"]
elif field["I"] == 11:
destinationTransportPort = field["V"]
elif field["I"] == 4:
protocolIdentifier = field["V"]
elif field["I"] == 6:
tcpControlBits = field["V"]
elif field["I"] == 1:
octetDeltaCount = field["V"]
out = b"%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" \
% (
flows["AgentID"],
sourceIPAddress,
destinationIPAddress,
ipNextHopIPAddress,
bgpSourceAsNumber,
bgpDestinationAsNumber,
protocolIdentifier,
sourceTransportPort,
destinationTransportPort,
tcpControlBits,
ingressInterface,
egressInterface,
octetDeltaCount,
exported_time,
)
sys.stdout.write(out)
except:
continue
|
VerizonDigital/vflow
|
consumers/memsql/transform.py
|
Python
|
apache-2.0
| 4,027
|
# -*- coding: utf-8 -*-
import datetime
from factory import Sequence, Faker, SubFactory
from factory.alchemy import SQLAlchemyModelFactory
from liebraryrest.database import db
from liebraryrest.models import Author, Book, User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory"""
class Meta:
"""Factory configuration."""
model = User
nickname = Faker('user_name')
class AuthorFactory(BaseFactory):
"""Author factory."""
class Meta:
"""Factory configuration."""
model = Author
inline_args = ('first_name', 'last_name')
first_name = Faker('first_name')
last_name = Faker('last_name')
birth_date = Sequence(lambda n: datetime.date(1975, 1, 1) + datetime.timedelta(days=n))
class BookFactory(BaseFactory):
"""Book factory."""
class Meta:
"""Factory configuration."""
model = Book
isbn = Faker('ean13')
title = Faker('sentence', nb_words=6, variable_nb_words=True)
author = SubFactory(AuthorFactory)
abstract = Faker('text', max_nb_chars=350)
pages = Sequence(lambda n: n)
publisher = Faker('word')
quantity = Sequence(lambda n: 1 + n)
|
gekorob/liebraryrest
|
tests/factories.py
|
Python
|
bsd-3-clause
| 1,354
|
from . import idnadata
import bisect
import unicodedata
import re
import sys
from .intranges import intranges_contain
_virama_combining_class = 9
_alabel_prefix = b'xn--'
_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
class IDNAError(UnicodeError):
""" Base exception for all IDNA-encoding related problems """
pass
class IDNABidiError(IDNAError):
""" Exception when bidirectional requirements are not satisfied """
pass
class InvalidCodepoint(IDNAError):
""" Exception when a disallowed or unallocated codepoint is used """
pass
class InvalidCodepointContext(IDNAError):
""" Exception when the codepoint is not valid in the context it is used """
pass
def _combining_class(cp):
v = unicodedata.combining(chr(cp))
if v == 0:
if not unicodedata.name(chr(cp)):
raise ValueError('Unknown character in unicodedata')
return v
def _is_script(cp, script):
return intranges_contain(ord(cp), idnadata.scripts[script])
def _punycode(s):
return s.encode('punycode')
def _unot(s):
return 'U+{:04X}'.format(s)
def valid_label_length(label):
if len(label) > 63:
return False
return True
def valid_string_length(label, trailing_dot):
if len(label) > (254 if trailing_dot else 253):
return False
return True
def check_bidi(label, check_ltr=False):
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == '':
# String likely comes from a newer version of Unicode
raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
if direction in ['R', 'AL', 'AN']:
bidi_label = True
if not bidi_label and not check_ltr:
return True
# Bidi rule 1
direction = unicodedata.bidirectional(label[0])
if direction in ['R', 'AL']:
rtl = True
elif direction == 'L':
rtl = False
else:
raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
valid_ending = False
number_type = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
# Bidi rule 3
if direction in ['R', 'AL', 'EN', 'AN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
# Bidi rule 4
if direction in ['AN', 'EN']:
if not number_type:
number_type = direction
else:
if number_type != direction:
raise IDNABidiError('Can not mix numeral types in a right-to-left label')
else:
# Bidi rule 5
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
# Bidi rule 6
if direction in ['L', 'EN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
if not valid_ending:
raise IDNABidiError('Label ends with illegal codepoint directionality')
return True
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
def check_hyphen_ok(label):
if label[2:4] == '--':
raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
if label[0] == '-' or label[-1] == '-':
raise IDNAError('Label must not start or end with a hyphen')
return True
def check_nfc(label):
if unicodedata.normalize('NFC', label) != label:
raise IDNAError('Label must be in Normalization Form C')
def valid_contextj(label, pos):
cp_value = ord(label[pos])
if cp_value == 0x200c:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
ok = False
for i in range(pos-1, -1, -1):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('L'), ord('D')]:
ok = True
break
if not ok:
return False
ok = False
for i in range(pos+1, len(label)):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('R'), ord('D')]:
ok = True
break
return ok
if cp_value == 0x200d:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
return False
else:
return False
def valid_contexto(label, pos, exception=False):
cp_value = ord(label[pos])
if cp_value == 0x00b7:
if 0 < pos < len(label)-1:
if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
return True
return False
elif cp_value == 0x0375:
if pos < len(label)-1 and len(label) > 1:
return _is_script(label[pos + 1], 'Greek')
return False
elif cp_value == 0x05f3 or cp_value == 0x05f4:
if pos > 0:
return _is_script(label[pos - 1], 'Hebrew')
return False
elif cp_value == 0x30fb:
for cp in label:
if cp == '\u30fb':
continue
if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
return True
return False
elif 0x660 <= cp_value <= 0x669:
for cp in label:
if 0x6f0 <= ord(cp) <= 0x06f9:
return False
return True
elif 0x6f0 <= cp_value <= 0x6f9:
for cp in label:
if 0x660 <= ord(cp) <= 0x0669:
return False
return True
def check_label(label):
if isinstance(label, (bytes, bytearray)):
label = label.decode('utf-8')
if len(label) == 0:
raise IDNAError('Empty Label')
check_nfc(label)
check_hyphen_ok(label)
check_initial_combiner(label)
for (pos, cp) in enumerate(label):
cp_value = ord(cp)
if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
continue
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
try:
if not valid_contextj(label, pos):
raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
_unot(cp_value), pos+1, repr(label)))
except ValueError:
raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
_unot(cp_value), pos+1, repr(label)))
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
if not valid_contexto(label, pos):
raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
else:
raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
check_bidi(label)
def alabel(label):
try:
label = label.encode('ascii')
ulabel(label)
if not valid_label_length(label):
raise IDNAError('Label too long')
return label
except UnicodeEncodeError:
pass
if not label:
raise IDNAError('No Input')
label = str(label)
check_label(label)
label = _punycode(label)
label = _alabel_prefix + label
if not valid_label_length(label):
raise IDNAError('Label too long')
return label
def ulabel(label):
if not isinstance(label, (bytes, bytearray)):
try:
label = label.encode('ascii')
except UnicodeEncodeError:
check_label(label)
return label
label = label.lower()
if label.startswith(_alabel_prefix):
label = label[len(_alabel_prefix):]
if not label:
raise IDNAError('Malformed A-label, no Punycode eligible content found')
if label.decode('ascii')[-1] == '-':
raise IDNAError('A-label must not end with a hyphen')
else:
check_label(label)
return label.decode('ascii')
label = label.decode('punycode')
check_label(label)
return label
def uts46_remap(domain, std3_rules=True, transitional=False):
"""Re-map the characters in the string according to UTS46 processing."""
from .uts46data import uts46data
output = ''
try:
for pos, char in enumerate(domain):
code_point = ord(char)
uts46row = uts46data[code_point if code_point < 256 else
bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
status = uts46row[1]
replacement = uts46row[2] if len(uts46row) == 3 else None
if (status == 'V' or
(status == 'D' and not transitional) or
(status == '3' and not std3_rules and replacement is None)):
output += char
elif replacement is not None and (status == 'M' or
(status == '3' and not std3_rules) or
(status == 'D' and transitional)):
output += replacement
elif status != 'I':
raise IndexError()
return unicodedata.normalize('NFC', output)
except IndexError:
raise InvalidCodepoint(
'Codepoint {} not allowed at position {} in {}'.format(
_unot(code_point), pos + 1, repr(domain)))
def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode('ascii')
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
result = []
if strict:
labels = s.split('.')
else:
labels = _unicode_dots_re.split(s)
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if labels[-1] == '':
del labels[-1]
trailing_dot = True
for label in labels:
s = alabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append(b'')
s = b'.'.join(result)
if not valid_string_length(s, trailing_dot):
raise IDNAError('Domain too long')
return s
def decode(s, strict=False, uts46=False, std3_rules=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode('ascii')
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
result = []
if not strict:
labels = _unicode_dots_re.split(s)
else:
labels = s.split('.')
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if not labels[-1]:
del labels[-1]
trailing_dot = True
for label in labels:
s = ulabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append('')
return '.'.join(result)
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/pip/_vendor/idna/core.py
|
Python
|
apache-2.0
| 11,849
|
"""
This scipt generates a noisy activation image image
and applies the bayesian structural analysis on it
Author : Bertrand Thirion, 2009
"""
#autoindent
print __doc__
import numpy as np
import scipy.stats as st
import matplotlib.pylab as mp
import nipy.neurospin.graph.field as ff
import nipy.neurospin.utils.simul_2d_multisubject_fmri_dataset as simul
import nipy.neurospin.spatial_models.bayesian_structural_analysis as bsa
import nipy.neurospin.spatial_models.structural_bfls as sbf
def make_bsa_2d(betas, theta=3., dmax=5., ths=0, thq=0.5, smin=0,
method='simple',verbose = 0):
"""
Function for performing bayesian structural analysis
on a set of images.
Parameters
----------
betas, array of shape (nsubj, dimx, dimy) the data used
Note that it is assumed to be a t- or z-variate
theta=3., float,
first level threshold of betas
dmax=5., float, expected between subject variability
ths=0, float,
null hypothesis for the prevalence statistic
thq=0.5, float,
p-value of the null rejection
smin=0, int,
threshold on the nu_mber of contiguous voxels
to make regions meaningful structures
method= 'simple', string,
estimation method used ; to be chosen among
'simple', 'dev', 'loo', 'ipmi'
verbose=0, verbosity mode
Returns
-------
AF the landmark_regions instance describing the result
BF: list of hroi instances describing the individual data
"""
ref_dim = np.shape(betas[0])
nsubj = betas.shape[0]
xyz = np.array(np.where(betas[:1])).T.astype(np.int)
nvox = np.size(xyz, 0)
# create the field strcture that encodes image topology
Fbeta = ff.Field(nvox)
Fbeta.from_3d_grid(xyz, 18)
# Get coordinates in mm
coord = xyz.astype(np.float)
# get the functional information
lbeta = np.array([np.ravel(betas[k]) for k in range(nsubj)]).T
# the voxel volume is 1.0
g0 = 1.0/(1.0*nvox)*1./np.sqrt(2*np.pi*dmax**2)
affine = np.eye(4)
shape = (1, ref_dim[0], ref_dim[1])
lmax=0
bdensity = 1
if method=='ipmi':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_ipmi(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_simple(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='loo':
mll, ll0 = bsa.compute_BSA_loo(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
return mll, ll0
if method=='dev':
group_map, AF, BF, likelihood = \
bsa.compute_BSA_dev(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq,
smin, ths, theta, g0, bdensity)
if method=='simple_quick':
likelihood = np.zeros(ref_dim)
group_map, AF, BF, coclustering = \
bsa.compute_BSA_simple_quick(Fbeta, lbeta, coord, dmax, xyz,
affine, shape, thq, smin, ths,
theta, g0)
if method=='sbf':
likelihood = np.zeros(ref_dim)
group_map, AF, BF = sbf.Compute_Amers (Fbeta, lbeta, xyz, affine, shape,
coord, dmax=dmax, thr=theta,
ths=ths , pval=thq)
if method not in['loo', 'dev','simple','ipmi','simple_quick','sbf']:
raise ValueError,'method is not ocrreactly defined'
if verbose==0:
return AF,BF
if AF != None:
lmax = AF.k+2
AF.show()
group_map.shape = ref_dim
mp.figure()
mp.subplot(1,3,1)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('Blob separation map')
mp.colorbar()
if AF != None:
group_map = AF.map_label(coord,0.95,dmax)
group_map.shape = ref_dim
mp.subplot(1,3,2)
mp.imshow(group_map, interpolation='nearest', vmin=-1, vmax=lmax)
mp.title('group-level position 95% \n confidence regions')
mp.colorbar()
mp.subplot(1,3,3)
likelihood.shape = ref_dim
mp.imshow(likelihood, interpolation='nearest')
mp.title('Spatial density under h1')
mp.colorbar()
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2, 5, s+1)
lw = -np.ones(ref_dim)
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = np.size(AF)+2
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
lw[xyzk[1],xyzk[2]] = nls[k]
mp.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax)
mp.axis('off')
mp.figure()
if nsubj==10:
for s in range(nsubj):
mp.subplot(2,5,s+1)
mp.imshow(betas[s],interpolation='nearest',vmin=betas.min(),
vmax=betas.max())
mp.axis('off')
return AF, BF
################################################################################
# Main script
################################################################################
# generate the data
nsubj = 10
dimx = 60
dimy = 60
pos = 2*np.array([[ 6, 7],
[10, 10],
[15, 10]])
ampli = np.array([5, 7, 6])
sjitter = 1.0
dataset = simul.make_surrogate_array(nbsubj=nsubj, dimx=dimx, dimy=dimy,
pos=pos, ampli=ampli, width=5.0)
betas = np.reshape(dataset, (nsubj, dimx, dimy))
# set various parameters
theta = float(st.t.isf(0.01, 100))
dmax = 5./1.5
ths = 1#nsubj/2
thq = 0.9
verbose = 1
smin = 5
method = 'simple'#'dev'#'ipmi'#'sbf'
# run the algo
AF, BF = make_bsa_2d(betas, theta, dmax, ths, thq, smin, method, verbose=verbose)
mp.show()
|
yarikoptic/NiPy-OLD
|
examples/neurospin/bayesian_structural_analaysis.py
|
Python
|
bsd-3-clause
| 6,285
|
#!/usr/bin/env python
import string
from AOR.Statements import Call
from AOR.BasicStatement import BasicStatement
from AOR.Do import Do,EndDo
from AOR.If import If,ElseIf,Else,EndIf,IfOnly
def GenerateFileCallTree(objFile,sFilename):
bDoLoops = 1
bIf = 1
nLevel = 3
lIgnore = ['']
mystring = "File:" + sFilename + "\n"
for sub in objFile.GetAllSubroutines():
sRoutineName = sub.GetName()
mystring = mystring + "Call tree for subroutine:" + sRoutineName + "\n"
mycalltree = CallTree(sub,nLevel,lIgnore, bDoLoops,bIf)
mystring = mystring + CallTree.__repr__(mycalltree) + "\n"
return mystring
class CallTree:
def __init__(self, oSubroutine, nLevel=11, lIgnore=[], bDo=None,bIf=None):
self.oSubroutine = oSubroutine
self.nLevel = nLevel
self.lIgnore = lIgnore
self.bDo = bDo
self.bIf = bIf
self.lstatements = []
self.lAllBlocks = []
self.lfind = [Call]
self.nCallnest = 0
# store routines to ignore in a dict.
self.dIgnore = {}
for i in self.lIgnore:
self.dIgnore[i]=1
# Are we interested in Do Loops surrounding calls
if self.bDo:
self.lfind.append(Do)
self.lfind.append(EndDo)
# Are we interested in If/Else/Endif surrounding calls
if self.bIf:
self.lfind.append(If)
self.lfind.append(Else)
self.lfind.append(ElseIf)
self.lfind.append(EndIf)
self.lfind.append(IfOnly)
lAll = self.oSubroutine.lGetStatements(self.lfind)
# Check for Routines to Ignore
for statement in lAll:
if isinstance(statement,Call):
if not self.dIgnore.has_key(statement.GetName()):
self.lstatements.append(statement)
# IfOnly statements may have calls, check those too
elif isinstance(statement,IfOnly):
checkIf = statement.GetStatement()
if isinstance(checkIf,Call):
if not self.dIgnore.has_key(checkIf.GetName()):
self.lstatements.append(statement)
else:
self.lstatements.append(statement)
# Get All Blocks at the outer most level for a subroutine
self.lAllBlocks = self.lGetAllBlocks()
return
def __repr__(self):
mystring = ""
for l in self.lAllBlocks:
if self.bHasCall(l):
for i in l:
mystring = mystring + BasicStatement.__repr__(i) + "\n"
return mystring
def bHasCall(self,lBlock):
bCall = 0
for i in lBlock:
if isinstance(i,Call):
bCall = 1
return bCall
def lGetAllBlocks(self):
lBlocks = []
lnextBlock = []
nest = 0
for i in self.lstatements:
if isinstance(i,IfOnly):
if isinstance(i.GetStatement(),Call):
lnextBlock.append(i)
elif isinstance(i,If):
nest = nest + 1
lnextBlock.append(i)
elif isinstance(i,EndIf):
lnextBlock.append(i)
if nest==1:
lBlocks.append(lnextBlock)
lnextBlock = []
nest = 0
else:
nest = nest - 1
elif isinstance(i,Call):
lnextBlock.append(i)
if nest==0:
lBlocks.append(lnextBlock)
lnextBlock = []
elif isinstance(i,Do):
lnextBlock.append(i)
nest = nest + 1
elif isinstance(i,EndDo):
lnextBlock.append(i)
if nest==1:
lBlocks.append(lnextBlock)
lnextBlock = []
nest = 0
else:
nest = nest - 1
return lBlocks
|
hiker/stan
|
Analyser/CallTree.py
|
Python
|
gpl-3.0
| 3,312
|
"""
Copyright (c) 2008-2015, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from .test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_simple..." % self.__class__.__name__)
anID = self.env.lock_id()
if verbose:
print("locker ID: %s" % anID)
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print("Aquired lock: %s" % lock)
self.env.lock_put(lock)
if verbose:
print("Released lock: %s" % lock)
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_threaded..." % self.__class__.__name__)
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
def test04_lock_timeout2(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
# Floating point rounding
self.assertTrue((end_time-start_time) >= 0.0999)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print("%s: locker ID: %s" % (name, anID))
for i in range(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
quentinlautischer/291MiniProject2
|
lib/python3.5/site-packages/bsddb3/tests/test_lock.py
|
Python
|
apache-2.0
| 7,824
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from pycsw import server
from geonode.catalogue.backends.pycsw_local import CONFIGURATION
@csrf_exempt
def csw_global_dispatch(request):
"""pycsw wrapper"""
# this view should only operate if pycsw_local is the backend
# else, redirect to the URL of the non-pycsw_local backend
if (settings.CATALOGUE['default']['ENGINE'] !=
'geonode.catalogue.backends.pycsw_local'):
return HttpResponseRedirect(settings.CATALOGUE['default']['URL'])
mdict = dict(settings.PYCSW['CONFIGURATION'], **CONFIGURATION)
env = request.META.copy()
env.update({
'local.app_root': os.path.dirname(__file__),
'REQUEST_URI': request.build_absolute_uri(),
})
csw = server.Csw(mdict, env)
content = csw.dispatch_wsgi()
return HttpResponse(content, content_type=csw.contenttype)
|
GISPPU/GrenadaLandInformation
|
geonode/catalogue/views.py
|
Python
|
gpl-3.0
| 1,844
|
from channels.routing import route, include
channel_routing = [
#UNLIKE urls.py, path starts with /
include('voxel_globe.websockets.routing.channel_routing', path=r'^/ws')
]
|
ngageoint/voxel-globe
|
voxel_globe/vip/routing.py
|
Python
|
mit
| 178
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals)
"""
===========================
Regulatory Control Measures
===========================
:Authors:
Moritz Emanuel Beber
:Date:
2013-05-10
:Copyright:
Copyright(c) 2013 Jacobs University of Bremen. All rights reserved.
:File:
control.py
"""
__all__ = ["digital_control", "digital_ctc", "digital_ctc_fixed_regulators",
# "continuous_digital_control", "continuous_digital_ctc",
# "continuous_digital_ctc_fixed_regulators",
# "delayed_continuous_digital_control",
# "delayed_continuous_digital_ctc",
"analog_control", "analog_ctc",
# "continuous_analog_control", "continuous_analog_ctc",
"metabolic_coherence_ratio", "metabolic_coherence"]
import logging
import re
import numpy as np
from . import measures as ms
from . import shuffling as shuff
from . import networks as nets
from .. import miscellaneous as misc
from ..statistics import compute_zscore
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(misc.NullHandler())
OPTIONS = misc.OptionsManager.get_instance()
def digital_control(effective, measure=ms.discrete_total_ratio):
"""
Compute the digital control of an effective transcriptional regulatory
network (TRN).
Parameters
----------
effective: TRN or nx.(Multi)DiGraph
Effective TRN.
measure: callable (optional)
Takes the effective network as its only argument and returns the
magnitude of control within it.
References
----------
[1] Marr, C., Geertz, M., Hütt, M.-T., Muskhelishvili, G., 2008.
Dissecting the logical types of network control in gene expression profiles.
BMC Systems Biology 2, 18.
"""
if effective is None or effective.size() == 0:
return np.nan
return measure(effective)
def digital_ctc(effective, reference, measure=ms.discrete_total_ratio,
random_num=1E04, return_sample=False):
"""
Compute the digital control type confidence (CTC) of an effective
transcriptional regulatory network (TRN).
This function computes a Z-score of digital control using a reference
(complete) TRN as a simple null model.
Parameters
----------
effective: TRN or nx.(Multi)DiGraph
Effective TRN.
reference: TRN or nx.(Multi)DiGraph
Complete TRN.
measure: callable (optional)
Takes the effective network as its only argument and returns the
magnitude of control within it.
random_num: int (optional)
Size of the sample distribution of digital control in the null model.
return_sample: bool (optional)
Whether or not to return the sample distribution.
References
----------
[1] Marr, C., Geertz, M., Hütt, M.-T., Muskhelishvili, G., 2008.
Dissecting the logical types of network control in gene expression profiles.
BMC Systems Biology 2, 18.
"""
random_num = int(random_num)
if effective is None or effective.size() == 0:
return np.nan
size = len(effective)
sample = [shuff.active_sample(reference, size, evaluate=measure) for i in xrange(random_num)]
z_score = compute_zscore(measure(effective), sample)
if return_sample:
return (z_score, sample)
else:
return z_score
def digital_ctc_fixed_regulators(effective, reference, measure=ms.discrete_total_ratio,
random_num=1E04, return_sample=False):
"""
Compute the digital control type confidence (CTC) of an effective
transcriptional regulatory network (TRN).
This function computes a Z-score of digital control using an improved null
model where the number of regulating nodes (out-degree > 0) is kept constant.
Parameters
----------
effective: TRN or nx.(Multi)DiGraph
Effective TRN.
reference: TRN or nx.(Multi)DiGraph
Complete TRN.
measure: callable (optional)
Takes the effective network as its only argument and returns the
magnitude of control within it.
random_num: int (optional)
Size of the sample distribution of digital control in the null model.
return_sample: bool (optional)
Whether or not to return the sample distribution.
References
----------
[1]
"""
random_num = int(random_num)
if effective is None or effective.size() == 0:
return np.nan
(eff_regs, eff_slaves) = nets.split_regulators(effective)
(regulators, slaves) = nets.split_regulators(reference)
LOGGER.info("picked %d regulators", len(eff_regs))
sample = [shuff.fixed_regulator_sample(reference, regulators, len(eff_regs),
slaves, len(eff_slaves), evaluate=measure) for i in range(random_num)]
z_score = compute_zscore(measure(effective), sample)
if return_sample:
return (z_score, sample)
else:
return z_score
#def continuous_digital_control(trn, active, levels,
# measure=ms.continuous_functional_coherence):
# """
# Compute the continuous digital control of a transcriptional regulatory
# network (TRN).
#
# Uses expression levels of nodes in a TRN in order to evaluate the magnitude of
# control.
#
# Parameters
# ----------
# trn: TRN or nx.(Multi)DiGraph
# Effective TRN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected).
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
#
# References
# ----------
# [1]
# """
# if trn is None or trn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# return measure(trn, node2level)
#
#def continuous_digital_ctc(trn, active, levels,
# measure=ms.continuous_functional_coherence, random_num=1E04,
# return_sample=False):
# """
# Compute the continuous digital control type confidence (CTC) in a
# transcriptional regulatory network (TRN).
#
# This function computes a Z-score of continuous digital control using
# expression levels of nodes in a TRN as a simple null model.
#
# Parameters
# ----------
# trn: TRN or nx.(Multi)DiGraph
# Effective TRN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected).
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
# random_num: int (optional)
# Size of the sample distribution of continuous digital control in the
# null model.
# return_sample: bool (optional)
# Whether or not to return the sample distribution.
#
# References
# ----------
# [1]
# """
# random_num = int(random_num)
# if trn is None or trn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# sample = [shuff.continuous_sample(trn, active, levels, evaluate=measure)\
# for i in xrange(random_num)]
# z_score = compute_zscore(measure(trn, node2level), sample)
# if return_sample:
# return (z_score, sample)
# else:
# return z_score
#
#def continuous_digital_ctc_fixed_regulators(trn, active, levels, random_num=1E04,
# return_sample=False, measure=ms.continuous_functional_coherence):
# """
# Compute the continuous digital control type confidence (CTC) in a
# transcriptional regulatory network (TRN).
#
# This function computes a Z-score of continuous digital control using
# expression levels of nodes in a TRN and considering expression levels of
# regulating nodes (out-degree > 0) and regulated nodes (out-degree = 0)
# separately.
#
# Parameters
# ----------
# trn: TRN or nx.(Multi)DiGraph
# Effective TRN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected).
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
# random_num: int (optional)
# Size of the sample distribution of continuous digital control in the
# null model.
# return_sample: bool (optional)
# Whether or not to return the sample distribution.
#
# References
# ----------
# [1] Marr, C., Geertz, M., Hütt, M.-T., Muskhelishvili, G., 2008.
# Dissecting the logical types of network control in gene expression profiles.
# BMC Systems Biology 2, 18.
# """
# random_num = int(random_num)
# if trn is None or trn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# (regulators, slaves) = nets.split_regulators(trn)
# # in TRN structure the out-hubs and spokes differentiation matters
# reg_levels = [node2level[node] for node in regulators]
# slave_levels = [node2level[node] for node in slaves]
# sample = [shuff.continuous_fixed_regulator_sample(trn, regulators, reg_levels,
# slaves, slave_levels, evaluate=measure) for i in xrange(random_num)]
# z_score = compute_zscore(measure(trn, node2level), sample)
# if return_sample:
# return (z_score, sample)
# else:
# return z_score
#
#def delayed_continuous_digital_control(trn, active, levels,
# delayed_levels, measure=ms.delayed_continuous_functional_coherence):
# """
# Compute the continuous digital control in a transcriptional regulatory
# network (TRN).
#
# This function computes the continuous digital control using
# expression levels of nodes in a TRN as a simple null model. Expression
# levels are considered at two different time points: if there is a link from
# u to v then the expression level for u at time t is compared with the
# expression level of v at time point t + 1.
#
# Parameters
# ----------
# trn: TRN or nx.(Multi)DiGraph
# Effective TRN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected) at time point t.
# delayed_levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected) at time point t + 1.
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
#
# References
# ----------
# [1]
# """
# #TODO: complete and test
# if trn is None or trn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# node2delayed = {node: lvl for (node, lvl) in izip(active, delayed_levels)}
# return measure(trn, node2level, node2delayed)
#
#def delayed_continuous_digital_ctc(trn, active, levels,
# delayed_levels, random_num=1E04, return_sample=False,
# measure=ms.delayed_continuous_functional_coherence):
# """
# Compute the continuous digital control type confidence (CTC) in a
# transcriptional regulatory network (TRN).
#
# This function computes a Z-score of continuous digital control using
# expression levels of nodes in a TRN as a simple null model. Expression
# levels are considered at two different time points: if there is a link from
# u to v then the expression level for u at time t is compared with the
# expression level of v at time point t + 1.
#
# Parameters
# ----------
# trn: TRN or nx.(Multi)DiGraph
# Effective TRN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected) at time point t.
# delayed_levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# expected) at time point t + 1.
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
# random_num: int (optional)
# Size of the sample distribution of continuous digital control in the
# null model.
# return_sample: bool (optional)
# Whether or not to return the sample distribution.
#
# References
# ----------
# [1]
# """
# #TODO: complete and test
# random_num = int(random_num)
# if trn is None or trn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# node2delayed = {node: lvl for (node, lvl) in izip(active, delayed_levels)}
# sample = [shuff.delayed_continuous_sample(trn, active, levels, delayed_levels,
# evaluate=measure) for i in xrange(random_num)]
# z_score = compute_zscore(measure(trn, node2level, node2delayed), sample)
# if return_sample:
# return (z_score, sample)
# else:
# return z_score
def analog_control(effective, measure=ms.discrete_total_ratio):
"""
Compute the analog control of an effective gene proximity network (GPN).
Parameters
----------
effective: GPN or nx.(Multi)Graph
Effective GPN.
measure: callable (optional)
Takes the effective network as its only argument and returns the
magnitude of control within it.
References
----------
[1] Marr, C., Geertz, M., Hütt, M.-T., Muskhelishvili, G., 2008.
Dissecting the logical types of network control in gene expression profiles.
BMC Systems Biology 2, 18.
"""
if effective is None or effective.size() == 0:
return np.nan
return measure(effective)
def analog_ctc(effective, reference, measure=ms.discrete_total_ratio,
random_num=1E04, return_sample=False):
"""
Compute the analog control type confidence (CTC) of an effective gene
proximity network (GPN).
This function computes a Z-score of analog control using a reference
(complete) GPN as a simple null model.
Parameters
----------
effective: GPN or nx.(Multi)Graph
Effective GPN.
reference: GPN or nx.(Multi)Graph
Complete GPN.
measure: callable (optional)
Takes the effective network as its only argument and returns the
magnitude of control within it.
random_num: int (optional)
Size of the sample distribution of analog control in the null model.
return_sample: bool (optional)
Whether or not to return the sample distribution.
References
----------
[1] Marr, C., Geertz, M., Hütt, M.-T., Muskhelishvili, G., 2008.
Dissecting the logical types of network control in gene expression profiles.
BMC Systems Biology 2, 18.
"""
random_num = int(random_num)
if effective is None or effective.size() == 0:
return np.nan
size = len(effective)
sample = [shuff.active_sample(reference, size, evaluate=measure) for i in xrange(random_num)]
z_score = compute_zscore(measure(effective), sample)
if return_sample:
return (z_score, sample)
else:
return z_score
#def continuous_analog_control(gpn, active, levels,
# measure=ms.continuous_abs_coherence):
# """
# Compute the continuous analog control of a gene proximity network (GPN).
#
# Uses expression levels of nodes in a GPN in order to evaluate the magnitude of
# control.
#
# Parameters
# ----------
# gpn: GPN or nx.(Multi)Graph
# Effective GPN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# as active expected).
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
#
# References
# ----------
# [1]
# """
# if gpn is None or gpn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# return measure(gpn, node2level)
#
#def continuous_analog_ctc(gpn, active, levels, measure=ms.continuous_abs_coherence,
# random_num=1E04, return_sample=False):
# """
# Compute the continuous analog control type confidence (CTC) in a gene
# proximity network (GPN).
#
# This function computes a Z-score of continuous analog control using
# expression levels of nodes in a GPN as a simple null model.
#
# Parameters
# ----------
# gpn: GPN or nx.(Multi)Graph
# Effective GPN.
# active: list
# Ordered collection of active nodes.
# levels: list
# Corresponding expression levels of active nodes (same shape and ordering
# as active expected).
# measure: callable (optional)
# Takes the effective network and expression level map and returns the
# magnitude of control.
# random_num: int (optional)
# Size of the sample distribution of continuous analog control in the
# null model.
# return_sample: bool (optional)
# Whether or not to return the sample distribution.
#
# References
# ----------
# [1]
# """
# random_num = int(random_num)
# if gpn is None or gpn.size() == 0:
# return np.nan
# node2level = {node: lvl for (node, lvl) in izip(active, levels)}
# sample = [shuff.continuous_sample(gpn, active, levels, evaluate=measure)\
# for i in xrange(random_num)]
# z_score = compute_zscore(measure(gpn, node2level), sample)
# if return_sample:
# return (z_score, sample)
# else:
# return z_score
def metabolic_coherence_ratio(metabolic_network, active, bnumber2gene,
rxn_centric=None, measure=ms.discrete_total_ratio):
"""
Compute the metabolic coherence ratio (MCR) from an effective metabolic
network.
Parameters
----------
active: iterable
An iterable with actively expressed genes.
Warning
-------
Unknown gene names are silently ignored.
References
----------
[1] Sonnenschein, N., Geertz, M., Muskhelishvili, G., Hütt, M.-T., 2011.
Analog regulation of metabolic demand.
BMC Syst Biol 5, 40.
"""
rxn_centric = nets.setup_metabolic(metabolic_network, rxn_centric)
if rxn_centric is np.nan:
return rxn_centric
bpattern = re.compile(r"b\d{4}")
active_reactions = list()
# evaluate whether a reaction can be active due to gene expression
for rxn in rxn_centric:
info = rxn.notes["gene_association"]
if info:
matches = bpattern.findall(info)
if not matches:
continue
for mobj in matches:
info = info.replace(mobj, str(bnumber2gene[mobj] in active))
activity = eval(info)
if activity:
active_reactions.append(rxn)
original = ms.effective_network(rxn_centric, active_reactions)
if len(original) == 0:
LOGGER.warn("empty effective network")
return np.nan
return measure(original)
def metabolic_coherence(metabolic_network, active, bnumber2gene, rxn_centric=None,
random_num=1E04, return_sample=False, measure=ms.discrete_total_ratio):
"""
Compute the metabolic coherence (MC) from an effective metabolic
network.
Parameters
----------
active: iterable
An iterable with actively expressed genes.
Warning
-------
Unknown gene names are silently ignored.
References
----------
[1] Sonnenschein, N., Geertz, M., Muskhelishvili, G., Hütt, M.-T., 2011.
Analog regulation of metabolic demand.
BMC Syst Biol 5, 40.
"""
random_num = int(random_num)
rxn_centric = nets.setup_metabolic(metabolic_network, rxn_centric)
if rxn_centric is np.nan:
return rxn_centric
bpattern = re.compile(r"b\d{4}")
active_reactions = list()
# evaluate whether a reaction can be active due to gene expression
for rxn in rxn_centric:
info = rxn.notes["gene_association"]
if info:
matches = bpattern.findall(info)
if not matches:
continue
for mobj in matches:
info = info.replace(mobj, str(bnumber2gene[mobj] in active))
activity = eval(info)
if activity:
active_reactions.append(rxn)
original = ms.effective_network(rxn_centric, active_reactions)
size = len(original)
if size == 0:
LOGGER.warn("empty effective network")
return np.nan
sample = [shuff.active_sample(rxn_centric, size, measure) for i in xrange(int(random_num))]
z_score = compute_zscore(measure(original), sample)
if return_sample:
return (z_score, sample)
else:
return z_score
|
Midnighter/pyorganism
|
pyorganism/regulation/control.py
|
Python
|
bsd-3-clause
| 21,060
|
from __future__ import absolute_import
import logging
from django.conf import settings
from sentry.cache.redis import RedisClusterCache, RbCache
from .base import BaseAttachmentCache
logger = logging.getLogger(__name__)
class RedisClusterAttachmentCache(BaseAttachmentCache):
def __init__(self, **options):
appendix = options.pop("appendix", None)
cluster_id = options.pop("cluster_id", None)
if cluster_id is None:
cluster_id = getattr(settings, "SENTRY_ATTACHMENTS_REDIS_CLUSTER", "rc-short")
BaseAttachmentCache.__init__(
self, inner=RedisClusterCache(cluster_id, **options), appendix=appendix
)
class RbAttachmentCache(BaseAttachmentCache):
def __init__(self, **options):
appendix = options.pop("appendix", None)
BaseAttachmentCache.__init__(self, inner=RbCache(**options), appendix=appendix)
# Confusing legacy name for RediscClusterCache
RedisAttachmentCache = RedisClusterAttachmentCache
|
mvaled/sentry
|
src/sentry/attachments/redis.py
|
Python
|
bsd-3-clause
| 993
|
import os.path as op
import warnings
from nose.tools import assert_true, assert_equal
from numpy.testing import assert_array_almost_equal
import numpy as np
from ...io import Raw
from ...io.proj import make_projector, activate_proj
from ..ssp import compute_proj_ecg, compute_proj_eog
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
dur_use = 5.0
eog_times = np.array([0.5, 2.3, 3.6, 14.5])
def test_compute_proj_ecg():
"""Test computation of ECG SSP projectors"""
raw = Raw(raw_fname).crop(0, 10, False)
raw.preload_data()
for average in [False, True]:
# For speed, let's not filter here (must also not reject then)
projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
ch_name='MEG 1531', bads=['MEG 2443'],
average=average, avg_ref=True,
no_proj=True, l_freq=None,
h_freq=None, reject=None,
tmax=dur_use, qrs_threshold=0.5)
assert_true(len(projs) == 7)
# heart rate at least 0.5 Hz, but less than 3 Hz
assert_true(events.shape[0] > 0.5 * dur_use and
events.shape[0] < 3 * dur_use)
# XXX: better tests
# without setting a bad channel, this should throw a warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
projs, events = compute_proj_ecg(raw, n_mag=2, n_grad=2, n_eeg=2,
ch_name='MEG 1531', bads=[],
average=average, avg_ref=True,
no_proj=True, l_freq=None,
h_freq=None, tmax=dur_use)
assert_equal(len(w), 1)
assert_equal(projs, None)
def test_compute_proj_eog():
"""Test computation of EOG SSP projectors"""
raw = Raw(raw_fname).crop(0, 10, False)
raw.preload_data()
for average in [False, True]:
n_projs_init = len(raw.info['projs'])
projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
bads=['MEG 2443'], average=average,
avg_ref=True, no_proj=False,
l_freq=None, h_freq=None,
reject=None, tmax=dur_use)
assert_true(len(projs) == (7 + n_projs_init))
assert_true(np.abs(events.shape[0] -
np.sum(np.less(eog_times, dur_use))) <= 1)
# XXX: better tests
# This will throw a warning b/c simplefilter('always')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
average=average, bads=[],
avg_ref=True, no_proj=False,
l_freq=None, h_freq=None,
tmax=dur_use)
assert_equal(len(w), 1)
assert_equal(projs, None)
def test_compute_proj_parallel():
"""Test computation of ExG projectors using parallelization"""
raw_0 = Raw(raw_fname).crop(0, 10, False)
raw_0.preload_data()
raw = raw_0.copy()
projs, _ = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2,
bads=['MEG 2443'], average=False,
avg_ref=True, no_proj=False, n_jobs=1,
l_freq=None, h_freq=None, reject=None,
tmax=dur_use)
raw_2 = raw_0.copy()
projs_2, _ = compute_proj_eog(raw_2, n_mag=2, n_grad=2, n_eeg=2,
bads=['MEG 2443'], average=False,
avg_ref=True, no_proj=False, n_jobs=2,
l_freq=None, h_freq=None, reject=None,
tmax=dur_use)
projs = activate_proj(projs)
projs_2 = activate_proj(projs_2)
projs, _, _ = make_projector(projs, raw_2.info['ch_names'],
bads=['MEG 2443'])
projs_2, _, _ = make_projector(projs_2, raw_2.info['ch_names'],
bads=['MEG 2443'])
assert_array_almost_equal(projs, projs_2, 10)
|
trachelr/mne-python
|
mne/preprocessing/tests/test_ssp.py
|
Python
|
bsd-3-clause
| 4,662
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import time
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow import exceptions
from taskflow.patterns import unordered_flow as uf
from taskflow import task
from taskflow.tests import utils
from taskflow.types import failure
import example_utils as eu # noqa
# INTRO: In this example we create two tasks which can trigger exceptions
# based on various inputs to show how to analyze the thrown exceptions for
# which types were thrown and handle the different types in different ways.
#
# This is especially important if a set of tasks run in parallel and each of
# those tasks may fail while running. This creates a scenario where multiple
# exceptions have been thrown and those exceptions need to be handled in a
# unified manner. Since an engine does not currently know how to resolve
# those exceptions (someday it could) the code using that engine and activating
# the flows and tasks using that engine will currently have to deal with
# catching those exceptions (and silencing them if this is desired).
#
# NOTE(harlowja): The engine *will* trigger rollback even under multiple
# exceptions being thrown, but at the end of that rollback the engine will
# rethrow these exceptions to the code that called the run() method; allowing
# that code to do further cleanups (if desired).
class FirstException(Exception):
"""Exception that first task raises."""
class SecondException(Exception):
"""Exception that second task raises."""
class FirstTask(task.Task):
def execute(self, sleep1, raise1):
time.sleep(sleep1)
if not isinstance(raise1, bool):
raise TypeError('Bad raise1 value: %r' % raise1)
if raise1:
raise FirstException('First task failed')
class SecondTask(task.Task):
def execute(self, sleep2, raise2):
time.sleep(sleep2)
if not isinstance(raise2, bool):
raise TypeError('Bad raise2 value: %r' % raise2)
if raise2:
raise SecondException('Second task failed')
def run(**store):
# Creates a flow, each task in the flow will examine the kwargs passed in
# here and based on those kwargs it will behave in a different manner
# while executing; this allows for the calling code (see below) to show
# different usages of the failure catching and handling mechanism.
flow = uf.Flow('flow').add(
FirstTask(),
SecondTask()
)
try:
with utils.wrap_all_failures():
taskflow.engines.run(flow, store=store,
engine='parallel')
except exceptions.WrappedFailure as ex:
unknown_failures = []
for a_failure in ex:
if a_failure.check(FirstException):
print("Got FirstException: %s" % a_failure.exception_str)
elif a_failure.check(SecondException):
print("Got SecondException: %s" % a_failure.exception_str)
else:
print("Unknown failure: %s" % a_failure)
unknown_failures.append(a_failure)
failure.Failure.reraise_if_any(unknown_failures)
eu.print_wrapped("Raise and catch first exception only")
run(sleep1=0.0, raise1=True,
sleep2=0.0, raise2=False)
# NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both
# task running before one of them fails, but with current implementation this
# works most of times, which is enough for our purposes here (as an example).
eu.print_wrapped("Raise and catch both exceptions")
run(sleep1=1.0, raise1=True,
sleep2=1.0, raise2=True)
eu.print_wrapped("Handle one exception, and re-raise another")
try:
run(sleep1=1.0, raise1=True,
sleep2=1.0, raise2='boom')
except TypeError as ex:
print("As expected, TypeError is here: %s" % ex)
else:
assert False, "TypeError expected"
|
junneyang/taskflow
|
taskflow/examples/wrapped_exception.py
|
Python
|
apache-2.0
| 4,732
|
"""empty message
Revision ID: 4986e64643f4
Revises: 175003d01257
Create Date: 2015-04-15 12:16:41.965765
"""
# revision identifiers, used by Alembic.
revision = '4986e64643f4'
down_revision = '175003d01257'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tile', sa.Column('feature_id', sa.String(), nullable=False))
op.alter_column('tile', 'date_acquired',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.create_unique_constraint(None, 'tile', ['feature_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'tile', type_='unique')
op.alter_column('tile', 'date_acquired',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.drop_column('tile', 'feature_id')
### end Alembic commands ###
|
justinwp/croplands
|
migrations/versions/4986e64643f4_.py
|
Python
|
mit
| 1,023
|
"""Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from traitlets.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from warnings import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument('-l', '--list', action='store_true',
help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session. It does not import anything
into the interactive namespace.
If you are using the inline matplotlib backend in the IPython Notebook
you can set which figure formats are enabled using the following::
In [1]: from IPython.display import set_matplotlib_formats
In [2]: set_matplotlib_formats('pdf', 'svg')
The default for inline figures sets `bbox_inches` to 'tight'. This can
cause discrepancies between the displayed image and the identical
image created using `savefig`. This behavior can be disabled using the
`%config` magic::
In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
In addition, see the docstring of
`IPython.display.set_matplotlib_formats` and
`IPython.display.set_matplotlib_close` for more information on
changing additional behaviors of the inline backend.
Examples
--------
To enable the inline backend for usage with the IPython Notebook::
In [1]: %matplotlib inline
In this case, where the matplotlib default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
You can list the available backends using the -l/--list option::
In [4]: %matplotlib --list
Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
if args.list:
backends_list = list(backends.keys())
print("Available matplotlib backends: %s" % backends_list)
else:
gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%matplotlib` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print("Using matplotlib backend: %s" % backend)
|
sserrot/champion_relationships
|
venv/Lib/site-packages/IPython/core/magics/pylab.py
|
Python
|
mit
| 6,448
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove - I2C ADC(http://www.seeedstudio.com/depot/Grove-I2C-ADC-p-1580.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grove_i2c_adc
import time
# You can initialize with a different address too: grove_i2c_adc.ADC(address=0x56)
adc= grove_i2c_adc.ADC()
while True:
#Print the 12 bit value from the I2C ADC
print(adc.adc_read())
time.sleep(.5)
|
penoud/GrovePi
|
Software/Python/grove_i2c_adc/i2c_adc_example.py
|
Python
|
mit
| 1,840
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, re
class EtlExport(unittest.TestCase):
# TODO Add class docstring
# TODO Add method docstring(s)
test_type = 'funct'
browser = 'firefox'
localhost = 'http://localhost:8000'
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_etl_export(self):
driver = self.driver
driver.get(self.base_url + "/datawarehouse/etl/upload")
driver.find_element_by_id("id_upload-inputFile").clear()
driver.find_element_by_id("id_upload-inputFile").send_keys("/opt/VECNet_env/vnetsource/datawarehouse/Selenium/Selenium Tests/household_data2.csv")
driver.find_element_by_css_selector("button.right.btn").click()
# find the select box and its options
select = Select(driver.find_element_by_id("id_selectTable-tables"))
options = select.options
print options
driver.find_element_by_css_selector("button.right.btn").click()
driver.find_element_by_id("addToMap").click()
driver.find_element_by_css_selector("button.right.btn").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
vecnet/vnetsource
|
datawarehouse/tests/Selenium_Tests/etlExport.py
|
Python
|
mpl-2.0
| 2,274
|
#!/usr/bin/env python
from optparse import OptionParser
import glob
from multiprocessing import Array, Pool
import os
import pdb
import h5py
import numpy as np
from scipy.stats import cauchy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from basenji.plots import jointplot
from basenji.sad5 import SAD5
'''
basenji_sad_norm.py
Compute normalization parameters across a split chromosome dataset.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] arg'
parser = OptionParser(usage)
parser.add_option('-o', dest='out_dir',
default='sad_norm')
parser.add_option('-s', dest='sample',
default=100000, type='int',
help='Number of SNPs to sample for fit [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide SAD HDF5 path')
else:
sad_h5_path = args[0]
# retrieve chromosome SAD HDF5 files
chr_sad_h5_files = sorted(glob.glob('%s/*/sad.h5' % sad_h5_path))
assert(len(chr_sad_h5_files) > 0)
# clean out any existing fits
# count SNPs across chromosomes
num_snps = 0
for chr_sad_h5_file in chr_sad_h5_files:
chr_sad_h5 = h5py.File(chr_sad_h5_file, 'r+')
# delete fit params
if 'target_cauchy_fit_loc' in chr_sad_h5.keys():
del chr_sad_h5['target_cauchy_fit_loc']
del chr_sad_h5['target_cauchy_fit_scale']
# delete norm params
if 'target_cauchy_norm_loc' in chr_sad_h5.keys():
del chr_sad_h5['target_cauchy_norm_loc']
del chr_sad_h5['target_cauchy_norm_scale']
# count SNPs
num_snps += chr_sad_h5['SAD'].shape[0]
num_targets = chr_sad_h5['SAD'].shape[-1]
chr_sad_h5.close()
# sample SNPs across chromosomes
sad = sample_sad(chr_sad_h5_files, options.sample, num_snps, num_targets)
# initialize fit parameters
target_cauchy_fit_loc = np.zeros(num_targets)
target_cauchy_fit_scale = np.zeros(num_targets)
# fit parameters
for ti in range(num_targets):
print('Fitting t%d' % ti, flush=True)
cp = cauchy.fit(sad[:,ti])
target_cauchy_fit_loc[ti] = cp[0]
target_cauchy_fit_scale[ti] = cp[1]
del sad
# write across chromosomes
for chr_sad_h5_file in chr_sad_h5_files:
chr_sad_h5 = h5py.File(chr_sad_h5_file, 'r+')
chr_sad_h5.create_dataset('target_cauchy_fit_loc',
data=target_cauchy_fit_loc)
chr_sad_h5.create_dataset('target_cauchy_fit_scale',
data=target_cauchy_fit_scale)
chr_sad_h5.close()
# compute normalization parameters
for chr_sad_h5_file in chr_sad_h5_files:
chr_sad5 = SAD5(chr_sad_h5_file)
# QC fit table
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
fit_out = open('%s/fits.txt' % options.out_dir, 'w')
for ti in range(num_targets):
print('%-4d %7.1e %7.1e' % (ti, target_cauchy_fit_loc[ti], target_cauchy_fit_scale[ti]), file=fit_out)
fit_out.close()
# QC quantiles
quantile_dir = '%s/quantiles' % options.out_dir
if not os.path.isdir(quantile_dir):
os.mkdir(quantile_dir)
sad_qc = sample_sad(chr_sad_h5_files, 2048, num_snps, num_targets)
for ti in np.linspace(0, num_targets-1, 64, dtype='int'):
# compute cauchy and argsort quantiles
cauchy_q = cauchy.cdf(sad_qc[:,ti], loc=target_cauchy_fit_loc[ti], scale=target_cauchy_fit_scale[ti])
sort_i = np.argsort(sad_qc[:,ti])
quantile_pdf = '%s/t%d.pdf' % (quantile_dir, ti)
jointplot(np.linspace(0,1,len(sort_i)), cauchy_q[sort_i], quantile_pdf,
square=True, cor=None, x_label='Empirical', y_label='Cauchy')
# QC plots
norm_dir = '%s/norm' % options.out_dir
if not os.path.isdir(norm_dir):
os.mkdir(norm_dir)
chr_sad5 = SAD5(chr_sad_h5_files[0])
qc_sample = 2048
if qc_sample < chr_sad5.num_snps:
ri = sorted(np.random.choice(np.arange(chr_sad5.num_snps), size=qc_sample, replace=False))
else:
ri = np.arange(chr_sad5.num_snps)
qc_sad_raw = chr_sad5.sad_matrix[ri]
qc_sad_norm = chr_sad5[ri]
for ti in np.linspace(0, num_targets-1, 32, dtype='int'):
plt.figure()
sns.jointplot(qc_sad_raw[:,ti], qc_sad_norm[:,ti], joint_kws={'alpha':0.5, 's':10})
plt.savefig('%s/t%d.pdf' % (norm_dir, ti))
plt.close()
def sample_sad(chr_sad_h5_files, sample, num_snps, num_targets):
# sample SNPs uniformly across chromosomes
if sample < num_snps:
ri = np.random.choice(np.arange(num_snps), size=sample, replace=False)
ri.sort()
else:
ri = np.arange(num_snps)
# read SAD across chromosomes
sad = np.zeros((len(ri), num_targets), dtype='float32')
chr_start = 0
si = 0
for chr_sad_h5_file in chr_sad_h5_files:
chr_sad_h5 = h5py.File(chr_sad_h5_file, 'r')
# determine chr interval
chr_end = chr_start + chr_sad_h5['SAD'].shape[0]
# filter/transform random indexes for chromosome
chr_ri_mask = (chr_start <= ri) & (ri < chr_end)
chr_ri = ri[chr_ri_mask] - chr_start
chr_snps = len(chr_ri)
# read chr SNPs
sad[si:si+chr_snps,:] = chr_sad_h5['SAD'][chr_ri,:]
chr_sad_h5.close()
# advance indexes
si += chr_snps
chr_start = chr_end
return sad
def fit_cauchy(sad, ti):
print('Fitting t%d' % ti)
return cauchy.fit(sad[:,ti])
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
calico/basenji
|
bin/archive/basenji_sad_norm.py
|
Python
|
apache-2.0
| 5,985
|
#!/usr/bin/env python
from __future__ import absolute_import
import sys
import os
import unittest
import numpy
# Need to temporarily append to the PYTHONPATH in order to import the
# newly built bytscl function
sys.path.append(os.getcwd())
from idl_functions import bytscl
class IDL_bytscl_Tester(unittest.TestCase):
"""
A unit testing procedure for the IDL BYTSCL function.
"""
def setUp(self):
self.array1 = numpy.random.randn(100,100)
self.array2 = numpy.random.randint(0,256,(100,100))
def test_output_range(self):
"""
Test that the output array is [0,255].
"""
byt = bytscl(self.array1)
outside = (byt < 0) | (byt > 255)
total = numpy.sum(outside)
self.assertEqual(total, 0)
def test_out_dtype(self):
"""
Test that the output array is of type uint8.
"""
byt = bytscl(self.array1)
dtype = byt.dtype
self.assertEqual(dtype, 'uint8')
def test_top_keyword(self):
"""
Test that the top keyword works as expected.
"""
# Set top to 200
byt = bytscl(self.array1, top=200)
mx = numpy.max(byt)
self.assertEqual(mx, 200)
def test_maxv_keyword(self):
"""
Test that the maxv keyword works as expected.
"""
# Set maxv to 200
byt = bytscl(self.array2, maxv=200)
control = numpy.sum(self.array2 >= 200)
total = numpy.sum(byt == 255)
self.assertEqual(total, control)
def test_minv_keyword(self):
"""
Test that the minv keyword works as expected.
"""
# Set minv to 200
byt = bytscl(self.array2, minv=200)
control = numpy.sum(self.array2 <= 200)
total = numpy.sum(byt == 0)
self.assertEqual(total, control)
def test_nan_keyword(self):
"""
Test that the nan keyword works as expected.
"""
# If array has any nan's then the output will return all zeros
array = self.array1.copy()
array[0,0] = numpy.nan
byt = bytscl(array, nan=True)
total = numpy.sum(byt)
self.assertTrue(total != 0)
def test_datatype_error(self):
"""
Test that an array of an unsupported datatype raises an error.
"""
arr = numpy.zeros((10,10), dtype='complex')
self.assertRaises(ValueError, bytscl, arr)
if __name__ == '__main__':
unittest.main()
|
sixy6e/idl-functions
|
tests/unit_test_idl_bytscl.py
|
Python
|
bsd-2-clause
| 2,479
|
import os
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import driving_data
import model
LOGDIR = './save'
sess = tf.InteractiveSession()
L2NormConst = 0.001
train_vars = tf.trainable_variables()
loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
sess.run(tf.initialize_all_variables())
# create a summary to monitor cost tensor
tf.scalar_summary("loss", loss)
# merge all summaries into a single op
merged_summary_op = tf.merge_all_summaries()
saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1)
# op to write logs to Tensorboard
logs_path = './logs'
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
epochs = 50
batch_size = 128
# train over the dataset about 30 times
for epoch in range(epochs):
for i in range(int(driving_data.num_images/batch_size)):
xs, ys = driving_data.LoadTrainBatch(batch_size)
train_step.run(feed_dict={model.x: xs, model.y_: ys, model.keep_prob: 0.8})
if i % 10 == 0:
xs, ys = driving_data.LoadValBatch(batch_size)
loss_value = loss.eval(feed_dict={model.x:xs, model.y_: ys, model.keep_prob: 1.0})
print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value))
# write logs at every iteration
summary = merged_summary_op.eval(feed_dict={model.x:xs, model.y_: ys, model.keep_prob: 1.0})
summary_writer.add_summary(summary, epoch * driving_data.num_images/batch_size + i)
if i % batch_size == 0:
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
checkpoint_path = os.path.join(LOGDIR, "model.ckpt")
filename = saver.save(sess, checkpoint_path)
print("Model saved in file: %s" % filename)
print("Run the command line:\n" \
"--> tensorboard --logdir=./logs " \
"\nThen open http://0.0.0.0:6006/ into your web browser")
|
UvinduW/RCAutopilot
|
PC/Training Scripts/train.py
|
Python
|
mit
| 1,973
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import basename
class Uvision4(Exporter):
NAME = 'uVision4'
TARGETS = [
'LPC1768',
'LPC11U24',
'KL05Z',
'KL25Z',
'KL43Z',
'KL46Z',
'K64F',
'K22F',
'K20D50M',
'TEENSY3_1',
'LPC1347',
'LPC1114',
'LPC11C24',
'LPC4088',
'LPC4330_M4',
'LPC4337',
'LPC812',
'NUCLEO_F030R8',
'NUCLEO_F070RB',
'NUCLEO_F072RB',
'NUCLEO_F091RC',
'NUCLEO_F103RB',
'NUCLEO_F302R8',
'NUCLEO_F303RE',
'NUCLEO_F334R8',
'NUCLEO_F401RE',
'NUCLEO_F411RE',
'NUCLEO_L053R8',
'NUCLEO_L152RE',
'UBLOX_C027',
'LPC1549',
# Removed as uvision4_lpc11u35_501.uvproj.tmpl is missing.
#'LPC11U35_501',
'NRF51822',
'HRM1017',
'ARCH_PRO',
'ARCH_BLE',
'DISCO_F407VG',
'DISCO_L053C8',
'MTS_GAMBIT',
'ARCH_MAX',
'MTS_MDOT_F405RG',
'NRF51_DK',
'NRF51_DONGLE',
'BLE_SMURFS',
'LPC11U37H_401',
]
USING_MICROLIB = [
'LPC11U24',
'LPC1114',
'LPC11C24',
'LPC812',
'NUCLEO_F030R8',
'NUCLEO_F070RB',
'NUCLEO_F072RB',
'NUCLEO_F091RC',
'NUCLEO_F103RB',
'NUCLEO_F302R8',
'NUCLEO_F303RE',
'NUCLEO_F334R8',
'NUCLEO_F401RE',
'NUCLEO_F411RE',
'NUCLEO_L053R8',
'NUCLEO_L152RE',
'LPC1549',
'LPC11U35_501',
'KL05Z',
'LPC11U37H_401',
]
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'8',
's_sources':'2'
}
FLAGS = [
"--gnu", "--no_rtti",
]
# By convention uVision projects do not show header files in the editor:
# 'headers':'5',
def get_toolchain(self):
return 'uARM' if (self.target in self.USING_MICROLIB) else 'ARM'
def get_flags(self):
return self.FLAGS
def generate(self):
source_files = {
'mbed': [],
'hal': [],
'src': []
}
for r_type, n in Uvision4.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
f = {'name': basename(file), 'type': n, 'path': file}
if file.startswith("mbed\\common"):
source_files['mbed'].append(f)
elif file.startswith("mbed\\targets"):
source_files['hal'].append(f)
else:
source_files['src'].append(f)
source_files = dict( [(k,v) for k,v in source_files.items() if len(v)>0])
ctx = {
'name': self.program_name,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'object_files': self.resources.objects + self.resources.libraries,
'source_files': source_files.items(),
'symbols': self.get_symbols() + ['__ASSERT_MSG'],
'hex_files' : self.resources.hex_files,
'flags' : self.get_flags(),
}
target = self.target.lower()
# Project file
self.gen_file('uvision4_%s.uvproj.tmpl' % target, ctx, '%s.uvproj' % self.program_name)
self.gen_file('uvision4_%s.uvopt.tmpl' % target, ctx, '%s.uvopt' % self.program_name)
|
Willem23/mbed
|
workspace_tools/export/uvision4.py
|
Python
|
apache-2.0
| 4,031
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
ops.RegisterShape("Cholesky")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("CholeskyGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatrixInverse")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatrixDeterminant")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SelfAdjointEig")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SelfAdjointEigV2")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Svd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatrixSolve")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatrixTriangularSolve")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatrixSolveLs")(common_shapes.call_cpp_shape_fn)
# Names below are lower_case.
# pylint: disable=invalid-name
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
```python
# Solve 10 separate 2x2 linear systems:
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
chol = tf.cholesky(A) # shape 10 x 2 x 2
X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1
# tf.matmul(A, X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (K = 5) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
Solution to `A x = rhs`, shape `[..., M, K]`.
"""
# To solve C C^* x = rhs, we
# 1. Solve C y = rhs for y, thus y = C^* x
# 2. Solve C^* x = y for x
with ops.name_scope(name, "cholesky_solve", [chol, rhs]):
y = gen_linalg_ops.matrix_triangular_solve(
chol, rhs, adjoint=False, lower=True)
x = gen_linalg_ops.matrix_triangular_solve(
chol, y, adjoint=True, lower=True)
return x
def eye(
num_rows,
num_columns=None,
batch_shape=None,
dtype=dtypes.float32,
name=None):
"""Construct an identity matrix, or a batch of matrices.
```python
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
```
Args:
num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
batch_shape: `int32` `Tensor`. If provided, returned `Tensor` will have
leading batch dimensions of this shape.
dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
with ops.name_scope(
name, default_name="eye", values=[num_rows, num_columns, batch_shape]):
batch_shape = [] if batch_shape is None else batch_shape
batch_shape = ops.convert_to_tensor(
batch_shape, name="shape", dtype=dtypes.int32)
if num_columns is None:
diag_size = num_rows
else:
diag_size = math_ops.minimum(num_rows, num_columns)
diag_shape = array_ops.concat(0, (batch_shape, [diag_size]))
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
if num_columns is None:
return array_ops.matrix_diag(diag_ones)
else:
shape = array_ops.concat(0, (batch_shape, [num_rows, num_columns]))
zero_matrix = array_ops.zeros(shape, dtype=dtype)
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \\(A\\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
"""
# pylint: disable=protected-access
return gen_linalg_ops._matrix_solve_ls(
matrix, rhs, l2_regularizer, fast=fast, name=name)
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
in `tensor` such that
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`.
v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
matrices contain eigenvectors of the corresponding matrices in `tensor`
"""
# pylint: disable=protected-access
e, v = gen_linalg_ops._self_adjoint_eig_v2(tensor, compute_v=True, name=name)
return e, v
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
"""
# pylint: disable=protected-access
e, _ = gen_linalg_ops._self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
def svd(tensor, compute_uv=True, full_matrices=False, name=None):
"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :,
:])`
```prettyprint
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
matrix: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`.
u: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
"""
# pylint: disable=protected-access
s, u, v = gen_linalg_ops._svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices)
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: enable=invalid-name
|
cg31/tensorflow
|
tensorflow/python/ops/linalg_ops.py
|
Python
|
apache-2.0
| 11,252
|
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
UTF8 utf8()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[:1] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
try:
file = self.file
self.file = None
if file is not None:
file.close()
finally:
sock = self.sock
self.sock = None
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError as exc:
# The server might already have closed the connection.
# On Windows, this may result in WSAEINVAL (error 10022):
# An invalid operation was attempted.
if (exc.errno != errno.ENOTCONN
and getattr(exc, 'winerror', 0) != 10022):
raise
finally:
sock.close()
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.[^<]*(<.*>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def utf8(self):
"""Try to enter UTF-8 mode (see RFC 6856). Returns server response.
"""
return self._shortcmd('UTF8')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
self.sock = context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
sock = self.context.wrap_socket(sock,
server_hostname=self.host)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/poplib.py
|
Python
|
apache-2.0
| 14,717
|
"""
Provide the function doxygen comment in impl file.
It check if there is doxygen sytle comment in front of each function definition.
It only check none static and none private funcions definition.
Unfortunately, this rule can not determine the method is private or not,
if the function definition is located in a cpp file.
Please put the '// NS' if the right side of the private function signature to suppress the false alarms.
Example)
= a.cpp =
void KK::C() // NS
{
}
== Violation ==
= a.cpp =
void FunctionA() { <== Violation. No doxygen comment.
}
/* <== Violation. It's not the doxygen comment
*
*/
void FunctionB()
{
}
== Good ==
= a.cpp =
/** <== OK
* blar blar
*/
void FunctionA()
{
}
/**
* blar blar
*/
void FunctionB(); <== OK.
class A {
private :
void FunctionC() { <== Don't care. it's the private function.
}
}
static void FunctionD() <== Don't care. it's the c style private function.
{
}
= a.h =
void FunctionB(); <== Don't care. It's the declared in the header.
"""
import nsiqcppstyle_reporter
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, fullName, decl, contextStack, context) :
ext = lexer.filename[lexer.filename.rfind("."):]
if not decl and ext != ".h" and context != None:
upperBlock = contextStack.SigPeek()
if upperBlock != None and upperBlock.type == "CLASS_BLOCK" and upperBlock.additional == "PRIVATE":
return
t1 = lexer.GetPrevTokenInType("STATIC", True)
t2 = lexer.GetPrevTokenInTypeList(["SEMI", "RBRACE"], True)
if t1 != None and (t2 == None or t1.lexpos > t2.lexpos) :
return
t = lexer.GetCurToken()
lexer.PushTokenIndex()
t2 = lexer.GetPrevTokenInType("COMMENT")
lexer.PopTokenIndex()
lexer.PushTokenIndex()
t3 = lexer.GetPrevTokenInTypeList(["SEMI", "PREPROCESSOR"], False, True)
lexer.PopTokenIndex()
if t2 != None and t2.additional == "DOXYGEN" :
if t3 == None or t.lexpos > t3.lexpos :
return
nsiqcppstyle_reporter.Error(t, __name__, "Doxygen Comment should be provided in front of function (%s) in impl file." % fullName)
ruleManager.AddFunctionNameRule(RunRule)
def RunTypeScopeRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type in ["PUBLIC", "PRIVATE", "PROTECTED"] :
curContext = contextStack.SigPeek()
if curContext.type in ["CLASS_BLOCK", "STRUCT_BLOCK"]:
curContext.additional = t.type
ruleManager.AddTypeScopeRule(RunTypeScopeRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionNameRule(RunRule)
ruleManager.AddTypeScopeRule(RunTypeScopeRule)
def test1(self):
self.Analyze("thisfile.c",
"""
void FunctionA() {
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
/*
*
*/
extern void FunctionB() {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
class A {
public:
void ~A() {
}
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
class J {
/** HELLO */
C() {
}
public :
/** HELLO */
A();
private :
B() {}
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
/*
*
*/
static void FunctionB() {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("thisfile.h",
"""
int a;
void FunctionB(){
}
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("thisfile.c",
"""
int a;
void FunctionB(){
}
""")
assert CheckErrorContent(__name__)
def test8(self):
self.Analyze("thisfile.c",
"""
class J {
C() {
}
}
""")
assert CheckErrorContent(__name__)
|
codingpoets/tigl
|
thirdparty/nsiqcppstyle/rules/RULE_5_3_A_provide_doxygen_function_comment_on_function_in_impl.py
|
Python
|
apache-2.0
| 4,364
|
from django.db import models
from django.conf import settings
from sita.core.db.models import CatalogueMixin
# Create your models here.
class PatientManager(models.Manager):
def register(self, data, fields, user, **extra_fields):
print data
for key in data:
print key
if any(key in s for s in fields):
extra_fields.setdefault(key, data.get(key))
patient = self.model(
user_id=user.id,
**extra_fields
)
patient.save()
return patient
def exists(self, pk=None):
try:
patient = Patient.objects.get(id=pk)
return True
except Patient.DoesNotExist:
return False
class Patient(CatalogueMixin):
"""Create model Patient."""
last_name = models.CharField(
max_length=100,
null=True,
blank=True
)
mothers_name = models.CharField(
max_length=100,
null=True,
blank=True
)
email = models.EmailField(
max_length=254,
)
age = models.IntegerField(
null=True,
blank=True
)
mobile_phone = models.CharField(
max_length=10,
)
house_phone = models.CharField(
max_length=10,
null=True,
blank=True
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT
)
objects = PatientManager()
def get_fields(self):
list = []
for field in Patient._meta.fields:
list.append(field.name)
return list
|
Fabfm4/Sita-BackEnd
|
src/sita/patients/models.py
|
Python
|
apache-2.0
| 1,589
|
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import MaxToTotal as sMaxToTotal
class TestMaxToTotal_Streaming(TestCase):
def testRegression(self):
# triangle input
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
gen = VectorInput(envelope)
maxToTotal = sMaxToTotal()
p = Pool()
gen.data >> maxToTotal.envelope
maxToTotal.maxToTotal >> (p, 'lowlevel.maxToTotal')
run(gen)
result = p['lowlevel.maxToTotal']
self.assertAlmostEqual(result, .5, 5e-5) #this seems like a large error -rtoscano
self.assertAlmostEqual(result, MaxToTotal()(envelope), 5e-7)
def testEmpty(self):
gen = VectorInput([])
alg = sMaxToTotal()
p = Pool()
gen.data >> alg.envelope
alg.maxToTotal >> (p, 'lowlevel.maxToTotal')
run(gen)
# Make sure nothing was emitted to the pool
self.assertRaises(KeyError, lambda: p['lowlevel.maxToTotal'])
suite = allTests(TestMaxToTotal_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
NadineKroher/essentia
|
test/src/unittest/sfx/test_maxtototal_streaming.py
|
Python
|
agpl-3.0
| 1,940
|
"""
Customizations of sphinx for gnuradio use.
"""
from sphinx.ext.autodoc import py_ext_sig_re
from sphinx.ext.autodoc import ClassDocumenter, FunctionDocumenter, members_option
from sphinx.ext.autodoc import bool_option, members_set_option, identity
from sphinx.ext.autodoc import ALL
# A dictionary of the number of lines to delete from the beginning of docstrings
lines_to_delete = {}
def setup(sp):
# Fix line-breaks in signature.
sp.connect('autodoc-process-signature', fix_signature)
sp.connect('autodoc-process-docstring', remove_lines)
# Add node to autodocument signal-processing blocks.
sp.add_autodocumenter(OldBlockDocumenter)
sp.add_autodocumenter(BlockDocumenter)
sp.add_autodocumenter(PyBlockDocumenter)
def remove_lines(app, what, name, obj, options, lines):
del_lines = lines_to_delete.get(name, 0)
# Don't delete any lines if this is called again.
lines_to_delete[name] = 0
lines[:] = lines[del_lines:]
def fix_signature(app, what, name, obj, options, signature, return_annotation):
"""
SWIG produces signature at the top of docstrings of the form
'blah(int arg1, float arg2) -> return_type'
and if the string is long it breaks it over multiple lines.
Sphinx gets confused if it is broken over multiple lines.
fix_signature and remove_lines get around this problem.
"""
if return_annotation is not None:
return
if hasattr(obj, '__doc__'):
docs = obj.__doc__
else:
docs = None
if not docs:
return None
doclines = docs.split('\n')
del_lines = remove_linebreaks_in_signature(doclines)
# match first line of docstring against signature RE
match = py_ext_sig_re.match(doclines[0])
if not match:
return None
exmod, path, base, args, retann = match.groups()
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
lines_to_delete[name] = i - 1 + del_lines
# format args
signature = "({0})".format(args)
return signature, retann
def remove_linebreaks_in_signature(lines):
alllines = '\n'.join(lines)
alllines = alllines.lstrip()
bits = alllines.split('->')
if len(bits) == 1:
return 0
after = '->'.join(bits[1:])
after_lines = after.split('\n')
ending = None
remainder = []
for line in after_lines:
if line and ending is None:
ending = line
elif ending is not None:
remainder.append(line)
first_line = ' '.join([a.strip() for a in bits[0].split('\n') if a.strip()]) + ' -> ' + ending.strip()
match = py_ext_sig_re.match(first_line)
# If it is a signature, make the change to lines.
if match:
new_lines = [first_line] + remainder
lines[:] = new_lines
return len(bits[0].split('\n'))
else:
return 0
# These methods are not displayed in the documentation of blocks to
# avoid redundancy.
common_block_members =[
'check_topology',
'detail',
'history',
'input_signature',
'name',
'nitems_read',
'nitems_written',
'nthreads',
'output_multiple',
'output_signature',
'relative_rate',
'set_detail',
'set_nthreads',
'start',
'stop',
'thisown',
'to_basic_block',
'unique_id',
'make',
'alias',
'is_set_max_noutput_items',
'max_noutput_items',
'max_output_buffer',
'message_ports_in',
'message_ports_out',
'min_output_buffer',
'pc_input_buffers_full',
'pc_input_buffers_full_var',
'pc_noutput_items',
'pc_noutput_items_var',
'pc_nproduced',
'pc_nproduced_var',
'pc_output_buffers_full',
'pc_output_buffers_full_var',
'pc_work_time',
'pc_work_time_var',
'processor_affinity',
'set_block_alias',
'set_max_noutput_items',
'unset_max_noutput_items',
'set_max_output_buffer',
'set_min_output_buffer',
'set_processor_affinity',
'symbol_name',
'unset_processor_affinity',
'disconnect_all',
'index',
'length',
'lock',
'primitive_connect',
'primitive_disconnect',
'primitive_message_port_register_hier_in',
'primitive_message_port_register_hier_out',
'primitive_msg_connect',
'primitive_msg_disconnect',
'to_hier_block2',
'type',
'unlock',
]
class OldBlockDocumenter(FunctionDocumenter):
"""
Specialized Documenter subclass for gnuradio blocks.
It merges together the documentation for the generator function (e.g. blocks.head)
with the wrapped sptr (e.g. gr::blocks::head::sptr) to keep the documentation
tidier.
"""
objtype = 'oldblock'
directivetype = 'function'
# Don't want to use this for generic functions for give low priority.
priority = -10
def __init__(self, *args, **kwargs):
super(OldBlockDocumenter, self).__init__(*args, **kwargs)
# Get class name
bits = self.name.split('.')
if len(bits) != 3 or bits[0] != 'gnuradio':
raise ValueError("expected name to be of form gnuradio.x.y but it is {0}".format(self.name))
sptr_name = 'gnuradio.{0}.{0}_{1}_sptr'.format(bits[1], bits[2])
# Create a Class Documenter to create documentation for the classes members.
self.classdoccer = ClassDocumenter(self.directive, sptr_name, indent=self.content_indent)
self.classdoccer.doc_as_attr = False
self.classdoccer.real_modname = self.classdoccer.get_real_modname()
self.classdoccer.options.members = ALL
self.classdoccer.options.exclude_members = common_block_members
self.classdoccer.parse_name()
self.classdoccer.import_object()
def document_members(self, *args, **kwargs):
return self.classdoccer.document_members(*args, **kwargs)
class BlockDocumenter(FunctionDocumenter):
"""
Specialized Documenter subclass for new style gnuradio blocks.
It merges together the documentation for the generator function (e.g. wavelet.squash_ff)
with the wrapped sptr (e.g. wavelet.squash_ff_sptr) to keep the documentation
tidier.
"""
objtype = 'block'
directivetype = 'function'
# Don't want to use this for generic functions for give low priority.
priority = -10
def __init__(self, *args, **kwargs):
super(BlockDocumenter, self).__init__(*args, **kwargs)
# Get class name
sptr_name = self.name + '_sptr'
# Create a Class Documenter to create documentation for the classes members.
self.classdoccer = ClassDocumenter(self.directive, sptr_name, indent=self.content_indent)
self.classdoccer.doc_as_attr = False
self.classdoccer.real_modname = self.classdoccer.get_real_modname()
self.classdoccer.options.members = ALL
self.classdoccer.options.exclude_members = common_block_members
self.classdoccer.parse_name()
self.classdoccer.import_object()
def document_members(self, *args, **kwargs):
return self.classdoccer.document_members(*args, **kwargs)
class PyBlockDocumenter(ClassDocumenter):
"""
Specialized Documenter subclass for hierarchical python gnuradio blocks.
"""
objtype = 'pyblock'
directivetype = 'class'
def __init__(self, *args, **kwargs):
super(PyBlockDocumenter, self).__init__(*args, **kwargs)
self.options.members = ALL
self.options.exclude_members = common_block_members
|
chiotlune/ext
|
gnuradio-3.7.0.1/docs/sphinx/gnuradio_sphinx.py
|
Python
|
gpl-2.0
| 7,553
|
from os.path import basename
import logging
from inspect import stack
import numpy as np
from larray_editor.utils import (get_font, to_text_string,
is_float, is_number, LinearGradient, SUPPORTED_FORMATS, scale_to_01range,
Product, is_number_value, get_sample, get_sample_indices, logger)
from qtpy.QtCore import Qt, QModelIndex, QAbstractTableModel, Signal
from qtpy.QtGui import QColor
from qtpy.QtWidgets import QMessageBox
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
class AbstractArrayModel(QAbstractTableModel):
"""Labels Table Model.
Parameters
----------
parent : QWidget, optional
Parent Widget.
readonly : bool, optional
If True, data cannot be changed. False by default.
font : QFont, optional
Font. Default is `Calibri` with size 11.
"""
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
def __init__(self, parent=None, readonly=False, font=None):
QAbstractTableModel.__init__(self)
self.dialog = parent
self.readonly = readonly
if font is None:
font = get_font("arreditor")
self.font = font
self._data = None
self.rows_loaded = 0
self.cols_loaded = 0
self.total_rows = 0
self.total_cols = 0
def _set_data(self, data):
raise NotImplementedError()
def set_data(self, data, reset=True):
self._set_data(data)
if reset:
self.reset()
def rowCount(self, parent=QModelIndex()):
return self.rows_loaded
def columnCount(self, parent=QModelIndex()):
return self.cols_loaded
def fetch_more_rows(self):
if self.total_rows > self.rows_loaded:
remainder = self.total_rows - self.rows_loaded
items_to_fetch = min(remainder, self.ROWS_TO_LOAD)
self.beginInsertRows(QModelIndex(), self.rows_loaded,
self.rows_loaded + items_to_fetch - 1)
self.rows_loaded += items_to_fetch
self.endInsertRows()
def fetch_more_columns(self):
if self.total_cols > self.cols_loaded:
remainder = self.total_cols - self.cols_loaded
items_to_fetch = min(remainder, self.COLS_TO_LOAD)
self.beginInsertColumns(QModelIndex(), self.cols_loaded,
self.cols_loaded + items_to_fetch - 1)
self.cols_loaded += items_to_fetch
self.endInsertColumns()
def get_value(self, index):
raise NotImplementedError()
def _compute_rows_cols_loaded(self):
# Use paging when the total size, number of rows or number of
# columns is too large
size = self.total_rows * self.total_cols
if size > LARGE_SIZE:
self.rows_loaded = min(self.ROWS_TO_LOAD, self.total_rows)
self.cols_loaded = min(self.COLS_TO_LOAD, self.total_cols)
else:
if self.total_rows > LARGE_NROWS:
self.rows_loaded = self.ROWS_TO_LOAD
else:
self.rows_loaded = self.total_rows
if self.total_cols > LARGE_COLS:
self.cols_loaded = self.COLS_TO_LOAD
else:
self.cols_loaded = self.total_cols
def flags(self, index):
raise NotImplementedError()
def headerData(self, section, orientation, role=Qt.DisplayRole):
return None
def data(self, index, role=Qt.DisplayRole):
raise NotImplementedError()
def reset(self):
self.beginResetModel()
self.endResetModel()
if logger.isEnabledFor(logging.DEBUG):
caller = stack()[1]
logger.debug("model {} has been reset after call of {} from module {} at line {}".format(self.__class__,
caller.function, basename(caller.filename), caller.lineno))
class AxesArrayModel(AbstractArrayModel):
"""Axes Table Model.
Parameters
----------
parent : QWidget, optional
Parent Widget.
readonly : bool, optional
If True, data cannot be changed. False by default.
font : QFont, optional
Font. Default is `Calibri` with size 11.
"""
def __init__(self, parent=None, readonly=False, font=None):
AbstractArrayModel.__init__(self, parent, readonly, font)
self.font.setBold(True)
def _set_data(self, data):
# TODO: use sequence instead
if not isinstance(data, (list, tuple)):
QMessageBox.critical(self.dialog, "Error", "Expected list or tuple")
data = []
self._data = data
self.total_rows = 1
self.total_cols = len(data)
self._compute_rows_cols_loaded()
def flags(self, index):
"""Set editable flag"""
return Qt.ItemIsEnabled
def get_value(self, index):
i = index.column()
return str(self._data[i])
def get_values(self, left=0, right=None):
if right is None:
right = self.total_cols
values = self._data[left:right]
return values
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
if role == Qt.TextAlignmentRole:
return int(Qt.AlignCenter | Qt.AlignVCenter)
elif role == Qt.FontRole:
return self.font
elif role == Qt.BackgroundColorRole:
color = QColor(Qt.lightGray)
color.setAlphaF(.4)
return color
elif role == Qt.DisplayRole:
return self.get_value(index)
# elif role == Qt.ToolTipRole:
# return None
else:
return None
class LabelsArrayModel(AbstractArrayModel):
"""Labels Table Model.
Parameters
----------
parent : QWidget, optional
Parent Widget.
readonly : bool, optional
If True, data cannot be changed. False by default.
font : QFont, optional
Font. Default is `Calibri` with size 11.
"""
def __init__(self, parent=None, readonly=False, font=None):
AbstractArrayModel.__init__(self, parent, readonly, font)
self.font.setBold(True)
def _set_data(self, data):
# TODO: use sequence instead
if not isinstance(data, (list, tuple, Product)):
QMessageBox.critical(self.dialog, "Error", "Expected list, tuple or Product")
data = [[]]
self._data = data
self.total_rows = len(data[0])
self.total_cols = len(data) if self.total_rows > 0 else 0
self._compute_rows_cols_loaded()
def flags(self, index):
"""Set editable flag"""
return Qt.ItemIsEnabled
def get_value(self, index):
i = index.row()
j = index.column()
# we need to inverse column and row because of the way vlabels are generated
return str(self._data[j][i])
# XXX: I wonder if we shouldn't return a 2D Numpy array of strings?
def get_values(self, left=0, top=0, right=None, bottom=None):
if right is None:
right = self.total_rows
if bottom is None:
bottom = self.total_cols
values = [list(line[left:right]) for line in self._data[top:bottom]]
return values
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
if role == Qt.TextAlignmentRole:
return int(Qt.AlignCenter | Qt.AlignVCenter)
elif role == Qt.FontRole:
return self.font
elif role == Qt.BackgroundColorRole:
color = QColor(Qt.lightGray)
color.setAlphaF(.4)
return color
elif role == Qt.DisplayRole:
return self.get_value(index)
# elif role == Qt.ToolTipRole:
# return None
else:
return None
class DataArrayModel(AbstractArrayModel):
"""Data Table Model.
Parameters
----------
parent : QWidget, optional
Parent Widget.
readonly : bool, optional
If True, data cannot be changed. False by default.
format : str, optional
Indicates how data is represented in cells.
By default, they are represented as floats with 3 decimal points.
font : QFont, optional
Font. Default is `Calibri` with size 11.
bg_gradient : LinearGradient, optional
Background color gradient
bg_value : Numpy ndarray, optional
Background color value. Must have the shape as data
minvalue : scalar, optional
Minimum value allowed.
maxvalue : scalar, optional
Maximum value allowed.
"""
ROWS_TO_LOAD = 500
COLS_TO_LOAD = 40
newChanges = Signal(dict)
def __init__(self, parent=None, readonly=False, format="%.3f", font=None, minvalue=None, maxvalue=None):
AbstractArrayModel.__init__(self, parent, readonly, font)
self._format = format
self.minvalue = minvalue
self.maxvalue = maxvalue
self.color_func = None
self.vmin = None
self.vmax = None
self.bgcolor_possible = False
self.bg_value = None
self.bg_gradient = None
def get_format(self):
"""Return current format"""
# Avoid accessing the private attribute _format from outside
return self._format
def get_data(self):
"""Return data"""
return self._data
def _set_data(self, data):
# TODO: check that data respects minvalue/maxvalue
assert isinstance(data, np.ndarray) and data.ndim == 2
self._data = data
dtype = data.dtype
if dtype.names is None:
dtn = dtype.name
if dtn not in SUPPORTED_FORMATS and not dtn.startswith('str') \
and not dtn.startswith('unicode'):
QMessageBox.critical(self.dialog, "Error", "{} arrays are currently not supported".format(dtn))
return
# for complex numbers, shading will be based on absolute value
# but for all other types it will be the real part
# TODO: there are a lot more complex dtypes than this. Is there a way to get them all in one shot?
if dtype in (np.complex64, np.complex128):
self.color_func = np.abs
else:
self.color_func = None
# --------------------------------------
self.total_rows, self.total_cols = self._data.shape
self._compute_rows_cols_loaded()
def reset_minmax(self):
try:
data = self.get_values(sample=True)
color_value = self.color_func(data) if self.color_func is not None else data
if color_value.dtype.type == np.object_:
color_value = color_value[is_number_value(color_value)]
# this is probably broken if we have complex numbers stored as objects but I don't foresee
# this case happening anytime soon.
color_value = color_value.astype(float)
# ignore nan, -inf, inf (setting them to 0 or to very large numbers is not an option)
color_value = color_value[np.isfinite(color_value)]
self.vmin = float(np.min(color_value))
self.vmax = float(np.max(color_value))
self.bgcolor_possible = True
# ValueError for empty arrays, TypeError for object/string arrays
except (TypeError, ValueError):
self.vmin = None
self.vmax = None
self.bgcolor_possible = False
def set_format(self, format, reset=True):
"""Change display format"""
self._format = format
if reset:
self.reset()
def set_bg_gradient(self, bg_gradient, reset=True):
if bg_gradient is not None and not isinstance(bg_gradient, LinearGradient):
raise ValueError("Expected None or LinearGradient instance for `bg_gradient` argument")
self.bg_gradient = bg_gradient
if reset:
self.reset()
def set_bg_value(self, bg_value, reset=True):
if bg_value is not None and not (isinstance(bg_value, np.ndarray) and bg_value.shape == self._data.shape):
raise ValueError("Expected None or 2D Numpy ndarray with shape {} for `bg_value` argument"
.format(self._data.shape))
self.bg_value = bg_value
if reset:
self.reset()
def get_value(self, index):
i, j = index.row(), index.column()
return self._data[i, j]
def flags(self, index):
"""Set editable flag"""
if not index.isValid():
return Qt.ItemIsEnabled
flags = QAbstractTableModel.flags(self, index)
if not self.readonly:
flags |= Qt.ItemIsEditable
return Qt.ItemFlags(flags)
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return None
# if role == Qt.DecorationRole:
# return ima.icon('editcopy')
# if role == Qt.DisplayRole:
# return ""
if role == Qt.TextAlignmentRole:
return int(Qt.AlignRight | Qt.AlignVCenter)
elif role == Qt.FontRole:
return self.font
value = self.get_value(index)
if role == Qt.DisplayRole:
if value is np.ma.masked:
return ''
# for headers
elif isinstance(value, str) and not isinstance(value, np.str_):
return value
else:
return self._format % value
elif role == Qt.BackgroundColorRole:
if self.bgcolor_possible and self.bg_gradient is not None and value is not np.ma.masked:
if self.bg_value is None:
try:
v = self.color_func(value) if self.color_func is not None else value
if -np.inf < v < self.vmin:
# TODO: this is suboptimal, as it can reset many times (though in practice, it is usually
# ok). When we get buffering, we will need to compute vmin/vmax on the whole buffer
# at once, eliminating this problem (and we could even compute final colors directly
# all at once)
self.vmin = v
self.reset()
elif self.vmax < v < np.inf:
self.vmax = v
self.reset()
v = scale_to_01range(v, self.vmin, self.vmax)
except TypeError:
v = np.nan
else:
i, j = index.row(), index.column()
v = self.bg_value[i, j]
return self.bg_gradient[v]
# elif role == Qt.ToolTipRole:
# return "{}\n{}".format(repr(value),self.get_labels(index))
return None
def get_values(self, left=0, top=0, right=None, bottom=None, sample=False):
width, height = self.total_rows, self.total_cols
if right is None:
right = width
if bottom is None:
bottom = height
values = self._data[left:right, top:bottom]
if sample:
sample_indices = get_sample_indices(values, 500)
# we need to keep the dtype, otherwise numpy might convert mixed object arrays to strings
return np.array([values[i, j] for i, j in zip(*sample_indices)], dtype=values.dtype)
else:
return values
def convert_value(self, value):
"""
Parameters
----------
value : str
"""
dtype = self._data.dtype
if dtype.name == "bool":
try:
return bool(float(value))
except ValueError:
return value.lower() == "true"
elif dtype.name.startswith("string"):
return str(value)
elif dtype.name.startswith("unicode"):
return to_text_string(value)
elif is_float(dtype):
return float(value)
elif is_number(dtype):
return int(value)
else:
return complex(value)
def convert_values(self, values):
values = np.asarray(values)
res = np.empty_like(values, dtype=self._data.dtype)
try:
# TODO: use array/vectorized conversion functions (but watch out
# for bool)
# new_data = str_array.astype(data.dtype)
for i, v in enumerate(values.flat):
res.flat[i] = self.convert_value(v)
except ValueError as e:
QMessageBox.critical(self.dialog, "Error", "Value error: %s" % str(e))
return None
except OverflowError as e:
QMessageBox.critical(self.dialog, "Error", "Overflow error: %s" % e.message)
return None
return res
# TODO: I wonder if set_values should not actually change the data. In that case, ArrayEdtiorWidget.paste
# and DataArrayModel.setData should call another method "queueValueChange" or something like that. In any case
# it must be absolutely clear from either the method name, an argument (eg. update_data=False) or from the
# class name that the data is not changed directly.
# I am also unsure how this all thing will interect with the big adapter/model refactor in the buffer branch.
def set_values(self, left, top, right, bottom, values):
"""
This does NOT actually change any data directly. It will emit a signal that the data was changed,
which is intercepted by the undo-redo system which creates a command to change the values, execute it and
call .reset() on this model, which fetches and displays the new data. It is apparently NOT possible to add a
QUndoCommand onto the QUndoStack without executing it.
To add to the strangeness, this method updates self.vmin and self.vmax immediately, which leads to very odd
results (the color is updated but not the value) if one forgets to connect the newChanges signal to the
undo-redo system.
Parameters
----------
left : int
top : int
right : int
exclusive
bottom : int
exclusive
values : ndarray
must not be of the correct type
Returns
-------
tuple of QModelIndex or None
actual bounds (end bound is inclusive) if update was successful,
None otherwise
"""
values = self.convert_values(values)
if values is None:
return
values = np.atleast_2d(values)
vshape = values.shape
vwidth, vheight = vshape
width, height = right - left, bottom - top
assert vwidth == 1 or vwidth == width
assert vheight == 1 or vheight == height
# Add change to self.changes
# requires numpy 1.10
changes = {}
newvalues = np.broadcast_to(values, (width, height))
oldvalues = np.empty_like(newvalues)
for i in range(width):
for j in range(height):
pos = left + i, top + j
old_value = self._data[pos]
oldvalues[i, j] = old_value
new_value = newvalues[i, j]
if new_value != old_value:
changes[pos] = (old_value, new_value)
# Update vmin/vmax if necessary
if self.vmin is not None and self.vmax is not None:
# FIXME: -inf/+inf and non-number values should be ignored here too
colorval = self.color_func(values) if self.color_func is not None else values
old_colorval = self.color_func(oldvalues) if self.color_func is not None else oldvalues
# we need to lower vmax or increase vmin
if np.any(((old_colorval == self.vmax) & (colorval < self.vmax)) |
((old_colorval == self.vmin) & (colorval > self.vmin))):
self.reset_minmax()
self.reset()
# this is faster, when the condition is False (which should be most of the cases) than computing
# subset_max and checking if subset_max > self.vmax
if np.any(colorval > self.vmax):
self.vmax = float(np.nanmax(colorval))
self.reset()
if np.any(colorval < self.vmin):
self.vmin = float(np.nanmin(colorval))
self.reset()
# DataArrayModel should have a reference to an adapter?
if len(changes) > 0:
self.newChanges.emit(changes)
# XXX: I wonder if emitting dataChanged makes any sense since data has not actually changed!
top_left = self.index(left, top)
# -1 because Qt index end bounds are inclusive
bottom_right = self.index(right - 1, bottom - 1)
self.dataChanged.emit(top_left, bottom_right)
return top_left, bottom_right
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid() or self.readonly:
return False
i, j = index.row(), index.column()
result = self.set_values(i, j, i + 1, j + 1, value)
return result is not None
|
larray-project/larray-editor
|
larray_editor/arraymodel.py
|
Python
|
gpl-3.0
| 21,336
|
#!/usr/bin/env python
#
import pytest
# pytestmark = pytest.mark.skipif(False, reason="only run mannually")
pytestmark = pytest.mark.needs_mantid
interactive = False
import os
here = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(here, '..', "data")
import imp
dataurls = imp.load_source('dataurls', os.path.join(datadir, 'dataurls.py'))
import numpy as np, histogram.hdf as hh
from multiphonon.ui import batch
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
dest = os.path.join(datadir, 'ARCS_V_annulus.nxs')
if not os.path.exists(dest):
url = dataurls.ARCS_V_annulus
cmd = 'wget --quiet %r -O %r' % (url, dest)
exec_cmd(cmd)
# create temp dir
self.tmpdir = tmpdir = os.path.abspath('tmp.batch')
if os.path.exists(tmpdir):
import shutil
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
# make symlinks to create "a series of" data files
exec_cmd('ln -s %s %s/1.nxs' % (dest, tmpdir))
exec_cmd('ln -s %s %s/2.nxs' % (dest, tmpdir))
return
def test1(self):
"multiphonon.ui.batch"
_p = lambda f: os.path.join(self.tmpdir, f)
sample_nxs_list = [_p('1.nxs'), _p('2.nxs')]
mt_nxs_list = [None, None]
batch.process(sample_nxs_list, mt_nxs_list, os.path.join(here, 'V-params.yaml'))
self.assertTrue(np.allclose(
hh.load('work-1.nxs,None/final-dos.h5').I,
hh.load(os.path.join(here, 'expected_results', 'batch-1-final-dos.h5')).I
))
return
pass # end of TestCase
def exec_cmd(cmd):
if os.system(cmd):
raise RuntimeError("%s failed" % cmd)
if __name__ == "__main__":
interactive = True
unittest.main()
# End of file
|
sns-chops/multiphonon
|
tests/ui/batch_TestCase.py
|
Python
|
mit
| 1,833
|
__author__ = 'VinceVi83'
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from Gestion import Ctes
from Gestion.Enum import *
class AlarmMusicClock:
'''
TODO : change right on cron file
Launch crontab -e avant use this oart of the code
Feature to rec
'''
@staticmethod
def backupOriginal():
"""
Save the original file
:return:
"""
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S cp /var/spool/cron/crontabs/' + Ctes.user_linux + ' /var/spool/cron/crontabs/' + Ctes.user_linux + '.BAK')
@staticmethod
def memoriseAlarmMusicClock():
"""
Replace the current file in crontab with the original
:return:
"""
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S cp /var/spool/cron/crontabs/' + Ctes.user_linux + '.BAK /var/spool/cron/crontabs/' + Ctes.user_linux)
@staticmethod
def setAlarmCalendar(self, horaires):
'''
Model Day ['min','heure','*','*','day','cmd']
It's possible to do more specific schedule but for name it will be a simple it's depend of management
'''
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S cp /var/spool/cron/crontabs/' + Ctes.user_linux + '.BAK /home' + Ctes.user_linux + '/' + Ctes.user_linux)
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S chmod 777 /home/' + Ctes.user_linux)
fic = open('/home/' + Ctes.user_linux + '/' + Ctes.user_linux, 'a')
jours = horaires.split("\n")
for jour in jours:
fic.write('\n')
for param in jour:
fic.write(str(param) + ' ')
# memoriser ??? a ajouter
self.remove_file()
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S cp /home' + Ctes.user_linux + '/' + Ctes.user_linux + ' /var/spool/cron/crontabs/' + Ctes.user_linux)
@staticmethod
def removeUserAlarmMusicClock(self):
"""
Remove the current file in crontab
:return:
"""
os.system('echo ' + Ctes.pwd_linux + ' | sudo -S rm /var/spool/cron/crontabs/' + Ctes.user_linux)
|
VinceVi83/MultiRoom
|
Command/AlarmMusicClock.py
|
Python
|
gpl-3.0
| 2,106
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import argparse
import logging
import os
import platform
import shutil
import stat
import subprocess
import sys
import time
import threading
import re
import json
import tempfile
import six
from six.moves import range
_V8_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'third_party',
'v8'))
_JS_PARSER_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'third_party',
'parse5', 'parse5.js'))
_BOOTSTRAP_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'd8_bootstrap.js'))
_BASE64_COMPAT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'base64_compat.js'))
_PATH_UTILS_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'path_utils.js'))
_HTML_IMPORTS_LOADER_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'html_imports_loader.js'))
_HTML_TO_JS_GENERATOR_JS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'html_to_js_generator.js'))
_BOOTSTRAP_JS_CONTENT = None
_NUM_TRIALS = 3
def _ValidateSourcePaths(source_paths):
if source_paths is None:
return
for x in source_paths:
assert os.path.exists(x)
assert os.path.isdir(x)
assert os.path.isabs(x)
def _EscapeJsString(s):
assert isinstance(s, str)
return json.dumps(s)
def _RenderTemplateStringForJsSource(source, template, replacement_string):
return source.replace(template, _EscapeJsString(replacement_string))
def _GetBootStrapJsContent(source_paths):
assert isinstance(source_paths, list)
global _BOOTSTRAP_JS_CONTENT
if not _BOOTSTRAP_JS_CONTENT:
with open(_BOOTSTRAP_JS_DIR, 'r') as f:
_BOOTSTRAP_JS_CONTENT = f.read()
bsc = _BOOTSTRAP_JS_CONTENT
# Ensure that source paths are unique.
source_paths = list(set(source_paths))
source_paths_string = '[%s]' % (
','.join(_EscapeJsString(s) for s in source_paths))
bsc = bsc.replace('<%source_paths%>', source_paths_string)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%current_working_directory%>', os.getcwd())
bsc = _RenderTemplateStringForJsSource(
bsc, '<%path_utils_js_path%>', _PATH_UTILS_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%html_imports_loader_js_path%>', _HTML_IMPORTS_LOADER_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%html_to_js_generator_js_path%>', _HTML_TO_JS_GENERATOR_JS_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%js_parser_path%>', _JS_PARSER_DIR)
bsc = _RenderTemplateStringForJsSource(
bsc, '<%base64_compat_path%>', _BASE64_COMPAT_DIR)
bsc += '\n//@ sourceURL=%s\n' % _BOOTSTRAP_JS_DIR
return bsc
def _IsValidJsOrHTMLFile(parser, js_file_arg):
if not os.path.exists(js_file_arg):
parser.error('The file %s does not exist' % js_file_arg)
_, extension = os.path.splitext(js_file_arg)
if extension not in ('.js', '.html'):
parser.error('Input must be a JavaScript or HTML file')
return js_file_arg
def _GetD8BinaryPathForPlatform():
def _D8Path(*paths):
"""Join paths and make it executable."""
assert isinstance(paths, tuple)
exe = os.path.join(_V8_DIR, *paths)
st = os.stat(exe)
if not st.st_mode & stat.S_IEXEC:
os.chmod(exe, st.st_mode | stat.S_IEXEC)
return exe
if platform.system() == 'Linux' and platform.machine() == 'x86_64':
return _D8Path('linux', 'x86_64', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'aarch64':
return _D8Path('linux', 'arm', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'armv7l':
return _D8Path('linux', 'arm', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'mips':
return _D8Path('linux', 'mips', 'd8')
elif platform.system() == 'Linux' and platform.machine() == 'mips64':
return _D8Path('linux', 'mips64', 'd8')
elif platform.system() == 'Darwin' and platform.machine() == 'x86_64':
return _D8Path('mac', 'x86_64', 'd8')
elif platform.system() == 'Darwin' and platform.machine() == 'arm64':
return _D8Path('mac', 'arm', 'd8')
elif platform.system() == 'Windows' and platform.machine() == 'AMD64':
return _D8Path('win', 'AMD64', 'd8.exe')
else:
raise NotImplementedError(
'd8 binary for this platform (%s) and architecture (%s) is not yet'
' supported' % (platform.system(), platform.machine()))
# Speculative change to workaround a failure on Windows: speculation is that the
# script attempts to remove a file before the process using the file has
# completely terminated. So the function here attempts to retry a few times with
# a second timeout between retries. More details at https://crbug.com/946012
# TODO(sadrul): delete this speculative change since it didn't work.
def _RemoveTreeWithRetry(tree, retry=3):
for count in range(retry):
try:
shutil.rmtree(tree)
return
except:
if count == retry - 1:
raise
logging.warning('Removing %s failed. Retrying in 1 second ...' % tree)
time.sleep(1)
class RunResult(object):
def __init__(self, returncode, stdout):
self.returncode = returncode
self.stdout = stdout
def ExecuteFile(file_path, source_paths=None, js_args=None, v8_args=None,
stdout=subprocess.PIPE, stdin=subprocess.PIPE):
"""Execute JavaScript program in |file_path|.
Args:
file_path: string file_path that contains path the .js or .html file to be
executed.
source_paths: the list of absolute paths containing code. All the imports
js_args: a list of string arguments to sent to the JS program.
Args stdout & stdin are the same as _RunFileWithD8.
Returns:
The string output from running the JS program.
"""
res = RunFile(file_path, source_paths, js_args, v8_args, None, stdout, stdin)
return res.stdout
def RunFile(file_path, source_paths=None, js_args=None, v8_args=None,
timeout=None, stdout=subprocess.PIPE, stdin=subprocess.PIPE):
"""Runs JavaScript program in |file_path|.
Args are same as ExecuteFile.
Returns:
A RunResult containing the program's output.
"""
assert os.path.isfile(file_path)
_ValidateSourcePaths(source_paths)
_, extension = os.path.splitext(file_path)
if not extension in ('.html', '.js'):
raise ValueError('Can only execute .js or .html file. File %s has '
'unsupported file type: %s' % (file_path, extension))
if source_paths is None:
source_paths = [os.path.dirname(file_path)]
abs_file_path_str = _EscapeJsString(os.path.abspath(file_path))
for trial in range(_NUM_TRIALS):
try:
temp_dir = tempfile.mkdtemp()
temp_bootstrap_file = os.path.join(temp_dir, '_tmp_bootstrap.js')
with open(temp_bootstrap_file, 'w') as f:
f.write(_GetBootStrapJsContent(source_paths))
if extension == '.html':
f.write('\nHTMLImportsLoader.loadHTMLFile(%s, %s);' %
(abs_file_path_str, abs_file_path_str))
else:
f.write('\nHTMLImportsLoader.loadFile(%s);' % abs_file_path_str)
result = _RunFileWithD8(temp_bootstrap_file, js_args, v8_args, timeout,
stdout, stdin)
except:
# Save the exception.
t, v, tb = sys.exc_info()
try:
_RemoveTreeWithRetry(temp_dir)
except:
logging.error('Failed to remove temp dir %s.', temp_dir)
if 'Error reading' in str(v): # Handle crbug.com/953365
if trial == _NUM_TRIALS - 1:
logging.error(
'Failed to run file with D8 after %s tries.', _NUM_TRIALS)
six.reraise(t, v, tb)
logging.warn('Hit error %s. Retrying after sleeping.', v)
time.sleep(10)
continue
# Re-raise original exception.
six.reraise(t, v, tb)
_RemoveTreeWithRetry(temp_dir)
break
return result
def ExecuteJsString(js_string, source_paths=None, js_args=None, v8_args=None,
original_file_name=None, stdout=subprocess.PIPE,
stdin=subprocess.PIPE):
res = RunJsString(js_string, source_paths, js_args, v8_args,
original_file_name, stdout, stdin)
return res.stdout
def RunJsString(js_string, source_paths=None, js_args=None, v8_args=None,
original_file_name=None, stdout=subprocess.PIPE,
stdin=subprocess.PIPE):
_ValidateSourcePaths(source_paths)
try:
temp_dir = tempfile.mkdtemp()
if original_file_name:
name = os.path.basename(original_file_name)
name, _ = os.path.splitext(name)
temp_file = os.path.join(temp_dir, '%s.js' % name)
else:
temp_file = os.path.join(temp_dir, 'temp_program.js')
with open(temp_file, 'w') as f:
f.write(js_string)
result = RunFile(temp_file, source_paths, js_args, v8_args, None, stdout,
stdin)
except:
# Save the exception.
t, v, tb = sys.exc_info()
try:
_RemoveTreeWithRetry(temp_dir)
except:
logging.error('Failed to remove temp dir %s.', temp_dir)
# Re-raise original exception.
six.reraise(t, v, tb)
_RemoveTreeWithRetry(temp_dir)
return result
def _KillProcess(process, name, reason):
# kill() does not close the handle to the process. On Windows, a process
# will live until you delete all handles to that subprocess, so
# ps_util.ListAllSubprocesses will find this subprocess if
# we haven't garbage-collected the handle yet. poll() should close the
# handle once the process dies.
logging.warn('Killing process %s because %s.', name, reason)
process.kill()
time.sleep(.01)
for _ in range(100):
if process.poll() is None:
time.sleep(.1)
continue
break
else:
logging.warn('process %s is still running after we '
'attempted to kill it.', name)
def _RunFileWithD8(js_file_path, js_args, v8_args, timeout, stdout, stdin):
""" Execute the js_files with v8 engine and return the output of the program.
Args:
js_file_path: the string path of the js file to be run.
js_args: a list of arguments to passed to the |js_file_path| program.
v8_args: extra arguments to pass into d8. (for the full list of these
options, run d8 --help)
timeout: how many seconds to wait for d8 to finish. If None or 0 then
this will wait indefinitely.
stdout: where to pipe the stdout of the executed program to. If
subprocess.PIPE is used, stdout will be returned in RunResult.out.
Otherwise RunResult.out is None
stdin: specify the executed program's input.
"""
if v8_args is None:
v8_args = []
assert isinstance(v8_args, list)
args = [_GetD8BinaryPathForPlatform()] + v8_args
args.append(os.path.abspath(js_file_path))
full_js_args = [args[0]]
if js_args:
full_js_args += js_args
args += ['--'] + full_js_args
# Set stderr=None since d8 doesn't write into stderr anyway.
sp = subprocess.Popen(args, stdout=stdout, stderr=None, stdin=stdin)
if timeout:
deadline = time.time() + timeout
timeout_thread = threading.Timer(timeout, _KillProcess, args=(
sp, 'd8', 'it timed out'))
timeout_thread.start()
out, _ = sp.communicate()
if timeout:
timeout_thread.cancel()
# On Windows, d8's print() method add the carriage return characters \r to
# newline, which make the output different from d8 on posix. We remove the
# extra \r's to make the output consistent with posix platforms.
if platform.system() == 'Windows' and out:
out = re.sub(b'\r+\n', b'\n', six.ensure_binary(out))
# d8 uses returncode 1 to indicate an uncaught exception, but
# _RunFileWithD8 needs to distingiush between that and quit(1).
#
# To fix this, d8_bootstrap.js monkeypatches D8's quit function to
# adds 1 to an intentioned nonzero quit. So, now, we have to undo this
# logic here in order to raise/return the right thing.
returncode = sp.returncode
if returncode == 0:
return RunResult(0, out)
elif returncode == 1:
if out:
raise RuntimeError(
'Exception raised when executing %s:\n%s' % (js_file_path, out))
else:
raise RuntimeError(
'Exception raised when executing %s. '
'(Error stack is dumped into stdout)' % js_file_path)
else:
return RunResult(returncode - 1, out)
def main():
parser = argparse.ArgumentParser(
description='Run JavaScript file with v8 engine')
parser.add_argument('file_name', help='input file', metavar='FILE',
type=lambda f: _IsValidJsOrHTMLFile(parser, f))
parser.add_argument('--js_args', help='arguments for the js program',
nargs='+')
parser.add_argument('--source_paths', help='search path for the js program',
nargs='+', type=str)
args = parser.parse_args()
if args.source_paths:
args.source_paths = [os.path.abspath(x) for x in args.source_paths]
else:
args.source_paths = [os.path.abspath(os.path.dirname(args.file_name))]
logging.warning(
'--source_paths is not specified. Use %s for search path.' %
args.source_paths)
res = RunFile(args.file_name, source_paths=args.source_paths,
js_args=args.js_args, timeout=None, stdout=sys.stdout,
stdin=sys.stdin)
return res.returncode
|
catapult-project/catapult
|
third_party/vinn/vinn/_vinn.py
|
Python
|
bsd-3-clause
| 13,524
|
# -*- coding: utf-8 -*-
"""
main.py
Jan 14, 2014
Copyright (C) 2014 Baris Sencan
"""
import os
import redis
from flask import Flask, render_template, url_for
from flask.ext.compress import Compress
app = Flask(__name__)
# Enable gzip compression.
Compress(app)
# Static file loading helper.
app.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename=filename))
# Redis configuration.
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
redis = redis.from_url(redis_url)
# DNS list (plot twist: it's actually a dictionary).
dns_list = {
'Google': ('8.8.8.8', '8.8.4.4'),
'OpenDNS': ('208.67.222.222', '208.67.220.220'),
'TTNet': ('195.175.39.40', '195.175.39.39')
}
@app.route('/')
def home():
# Fetch information from database and render page.
status_for = dict()
for server in dns_list:
try:
status_for[server] = redis.get(server)
except:
status_for[server] = 'unknown'
return render_template('home.html', dns_list=dns_list, status_for=status_for)
|
isair/youtubekapatildimi
|
main.py
|
Python
|
apache-2.0
| 1,063
|
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item_bracelet_r_set_spy_utility_a_01_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_bracelet_r_set_spy_utility_a_01_01')
object.setIntAttribute('required_combat_level', 85)
object.setStringAttribute('class_required', 'Spy')
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:strength_modified', 25)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:fast_attack_line_sp_smoke', 1)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_duration_line_sp_preparation', 1)
object.setIntAttribute('cat_skill_mod_bonus.@stat_n:expertise_cooldown_line_sp_preparation', 6)
object.setStringAttribute('@set_bonus:piece_bonus_count_3', '@set_bonus:set_bonus_spy_utility_a_1')
object.setStringAttribute('@set_bonus:piece_bonus_count_4', '@set_bonus:set_bonus_spy_utility_a_2')
object.setStringAttribute('@set_bonus:piece_bonus_count_5', '@set_bonus:set_bonus_spy_utility_a_3')
object.setAttachment('setBonus', 'set_bonus_spy_utility_a')
return
|
ProjectSWGCore/NGECore2
|
scripts/object/tangible/wearables/bracelet/item_bracelet_r_set_spy_utility_a_01_01.py
|
Python
|
lgpl-3.0
| 1,078
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WorkspaceShortcut(Document):
pass
|
mhbu50/frappe
|
frappe/desk/doctype/workspace_shortcut/workspace_shortcut.py
|
Python
|
mit
| 235
|
"""
Svg reader.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_tools import path
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import svg_reader
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getGeometryOutput(derivation, xmlElement):
"Get vector3 vertexes from attribute dictionary."
if derivation == None:
derivation = SVGDerivation(xmlElement)
return getGeometryOutputBySVGReader(derivation.svgReader, xmlElement)
def getGeometryOutputByArguments(arguments, xmlElement):
"Get vector3 vertexes from attribute dictionary by arguments."
derivation = SVGDerivation()
derivation.svgReader.parseSVG('', arguments[0])
return getGeometryOutput(derivation, xmlElement)
def getGeometryOutputBySVGReader(svgReader, xmlElement):
"Get vector3 vertexes from svgReader."
geometryOutput = []
for rotatedLoopLayer in svgReader.rotatedLoopLayers:
for loop in rotatedLoopLayer.loops:
vector3Path = euclidean.getVector3Path(loop, rotatedLoopLayer.z)
sideLoop = lineation.SideLoop(vector3Path, None, None)
sideLoop.rotate(xmlElement)
geometryOutput += lineation.getGeometryOutputByManipulation(sideLoop, xmlElement)
return geometryOutput
def getNewDerivation(xmlElement):
'Get new derivation.'
return SVGDerivation(xmlElement)
def processXMLElement(xmlElement):
"Process the xml element."
path.convertXMLElement(getGeometryOutput(None, xmlElement), xmlElement)
class SVGDerivation:
"Class to hold svg variables."
def __init__(self, xmlElement):
'Set defaults.'
self.svgReader = svg_reader.SVGReader()
self.svgReader.parseSVGByXMLElement(xmlElement)
def __repr__(self):
"Get the string representation of this SVGDerivation."
return str(self.__dict__)
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-40/fabmetheus_utilities/geometry/creation/_svg.py
|
Python
|
gpl-2.0
| 2,230
|
# coding=utf-8
"""Provider code for Torrenting."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class TorrentingProvider(TorrentProvider):
"""Torrenting Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(TorrentingProvider, self).__init__('Torrenting')
# URLs
self.url = 'https://www.torrenting.com/'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'browse.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
# Miscellaneous Options
self.enable_cookies = True
self.cookies = ''
self.required_cookies = ('uid', 'pass')
# Cache
self.cache = tv.Cache(self)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
# Search Params
search_params = {
'c4': 1, # TV/SD-x264
'c5': 1, # TV/X264 HD
'c18': 1, # TV/Packs
'c49': 1, # x265 (HEVC)
'search': '',
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params['search'] = search_string
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', {'id': 'torrentsTable'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
try:
torrent_items = row.find_all('td')
title = torrent_items[1].find('a').get_text(strip=True)
download_url = torrent_items[2].find('a')['href']
if not all([title, download_url]):
continue
download_url = urljoin(self.url, download_url)
seeders = try_int(torrent_items[5].get_text(strip=True))
leechers = try_int(torrent_items[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed:
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
' minimum seeders: {0}. Seeders: {1}',
title, seeders)
continue
torrent_size = torrent_items[4].get_text()
size = convert_size(torrent_size) or -1
pubdate_raw = torrent_items[1].find('div').get_text()
pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider')
return items
def login(self):
"""Login method used for logging in before doing search and torrent downloads."""
return self.cookie_login('sign in')
provider = TorrentingProvider()
|
pymedusa/Medusa
|
medusa/providers/torrent/html/torrenting.py
|
Python
|
gpl-3.0
| 5,376
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0011_group_displayed'),
]
operations = [
migrations.AddField(
model_name='group',
name='logo',
field=models.ImageField(null=True, upload_to=b'fraction_logos', blank=True),
),
]
|
ManoSeimas/manoseimas.lt
|
manoseimas/mps_v2/migrations/0012_group_logo.py
|
Python
|
agpl-3.0
| 431
|
#!/home/paulk/software/bin/python
import sys
import random
try:
fn = sys.argv[1]
col = int(sys.argv[2])
no = int(sys.argv[3])
except IndexError:
print >> sys.stderr,"Script to print a random number of items from a column."
print >> sys.stderr,"Usage: %s <file> <column> <number>" % sys.argv[0]
sys.exit(0)
f = open(fn)
data = [row.strip().split('\t')[col-1] for row in f]
f.close()
chosen = [random.choice(data) for i in xrange(no)]
for c in chosen:
print c
|
polarise/RP-python
|
pick_random_from_col.py
|
Python
|
gpl-2.0
| 470
|
# This file was created automatically by SWIG 1.3.29.
# Don't modify this file, modify the SWIG interface instead.
"""
Classes for a simple HTML rendering window, HTML Help Window, etc.
"""
import _html
import new
new_instancemethod = new.instancemethod
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import _windows
import _core
wx = _core
__docfilter__ = wx.__DocFilter(globals())
#---------------------------------------------------------------------------
HTML_ALIGN_LEFT = _html.HTML_ALIGN_LEFT
HTML_ALIGN_CENTER = _html.HTML_ALIGN_CENTER
HTML_ALIGN_RIGHT = _html.HTML_ALIGN_RIGHT
HTML_ALIGN_BOTTOM = _html.HTML_ALIGN_BOTTOM
HTML_ALIGN_TOP = _html.HTML_ALIGN_TOP
HTML_CLR_FOREGROUND = _html.HTML_CLR_FOREGROUND
HTML_CLR_BACKGROUND = _html.HTML_CLR_BACKGROUND
HTML_UNITS_PIXELS = _html.HTML_UNITS_PIXELS
HTML_UNITS_PERCENT = _html.HTML_UNITS_PERCENT
HTML_INDENT_LEFT = _html.HTML_INDENT_LEFT
HTML_INDENT_RIGHT = _html.HTML_INDENT_RIGHT
HTML_INDENT_TOP = _html.HTML_INDENT_TOP
HTML_INDENT_BOTTOM = _html.HTML_INDENT_BOTTOM
HTML_INDENT_HORIZONTAL = _html.HTML_INDENT_HORIZONTAL
HTML_INDENT_VERTICAL = _html.HTML_INDENT_VERTICAL
HTML_INDENT_ALL = _html.HTML_INDENT_ALL
HTML_COND_ISANCHOR = _html.HTML_COND_ISANCHOR
HTML_COND_ISIMAGEMAP = _html.HTML_COND_ISIMAGEMAP
HTML_COND_USER = _html.HTML_COND_USER
HW_SCROLLBAR_NEVER = _html.HW_SCROLLBAR_NEVER
HW_SCROLLBAR_AUTO = _html.HW_SCROLLBAR_AUTO
HW_NO_SELECTION = _html.HW_NO_SELECTION
HW_DEFAULT_STYLE = _html.HW_DEFAULT_STYLE
HTML_OPEN = _html.HTML_OPEN
HTML_BLOCK = _html.HTML_BLOCK
HTML_REDIRECT = _html.HTML_REDIRECT
HTML_URL_PAGE = _html.HTML_URL_PAGE
HTML_URL_IMAGE = _html.HTML_URL_IMAGE
HTML_URL_OTHER = _html.HTML_URL_OTHER
class HtmlLinkInfo(_core.Object):
"""Proxy of C++ HtmlLinkInfo class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String href, String target=EmptyString) -> HtmlLinkInfo"""
_html.HtmlLinkInfo_swiginit(self,_html.new_HtmlLinkInfo(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlLinkInfo
__del__ = lambda self : None;
def GetHref(*args, **kwargs):
"""GetHref(self) -> String"""
return _html.HtmlLinkInfo_GetHref(*args, **kwargs)
def GetTarget(*args, **kwargs):
"""GetTarget(self) -> String"""
return _html.HtmlLinkInfo_GetTarget(*args, **kwargs)
def GetEvent(*args, **kwargs):
"""GetEvent(self) -> MouseEvent"""
return _html.HtmlLinkInfo_GetEvent(*args, **kwargs)
def GetHtmlCell(*args, **kwargs):
"""GetHtmlCell(self) -> HtmlCell"""
return _html.HtmlLinkInfo_GetHtmlCell(*args, **kwargs)
def SetEvent(*args, **kwargs):
"""SetEvent(self, MouseEvent e)"""
return _html.HtmlLinkInfo_SetEvent(*args, **kwargs)
def SetHtmlCell(*args, **kwargs):
"""SetHtmlCell(self, HtmlCell e)"""
return _html.HtmlLinkInfo_SetHtmlCell(*args, **kwargs)
Event = property(GetEvent,SetEvent,doc="See `GetEvent` and `SetEvent`")
Href = property(GetHref,doc="See `GetHref`")
HtmlCell = property(GetHtmlCell,SetHtmlCell,doc="See `GetHtmlCell` and `SetHtmlCell`")
Target = property(GetTarget,doc="See `GetTarget`")
_html.HtmlLinkInfo_swigregister(HtmlLinkInfo)
cvar = _html.cvar
HtmlWindowNameStr = cvar.HtmlWindowNameStr
HtmlPrintoutTitleStr = cvar.HtmlPrintoutTitleStr
HtmlPrintingTitleStr = cvar.HtmlPrintingTitleStr
class HtmlTag(_core.Object):
"""Proxy of C++ HtmlTag class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def GetName(*args, **kwargs):
"""GetName(self) -> String"""
return _html.HtmlTag_GetName(*args, **kwargs)
def HasParam(*args, **kwargs):
"""HasParam(self, String par) -> bool"""
return _html.HtmlTag_HasParam(*args, **kwargs)
def GetParam(*args, **kwargs):
"""GetParam(self, String par, int with_commas=False) -> String"""
return _html.HtmlTag_GetParam(*args, **kwargs)
def GetAllParams(*args, **kwargs):
"""GetAllParams(self) -> String"""
return _html.HtmlTag_GetAllParams(*args, **kwargs)
def HasEnding(*args, **kwargs):
"""HasEnding(self) -> bool"""
return _html.HtmlTag_HasEnding(*args, **kwargs)
def GetBeginPos(*args, **kwargs):
"""GetBeginPos(self) -> int"""
return _html.HtmlTag_GetBeginPos(*args, **kwargs)
def GetEndPos1(*args, **kwargs):
"""GetEndPos1(self) -> int"""
return _html.HtmlTag_GetEndPos1(*args, **kwargs)
def GetEndPos2(*args, **kwargs):
"""GetEndPos2(self) -> int"""
return _html.HtmlTag_GetEndPos2(*args, **kwargs)
AllParams = property(GetAllParams,doc="See `GetAllParams`")
BeginPos = property(GetBeginPos,doc="See `GetBeginPos`")
EndPos1 = property(GetEndPos1,doc="See `GetEndPos1`")
EndPos2 = property(GetEndPos2,doc="See `GetEndPos2`")
Name = property(GetName,doc="See `GetName`")
_html.HtmlTag_swigregister(HtmlTag)
class HtmlParser(_core.Object):
"""Proxy of C++ HtmlParser class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def SetFS(*args, **kwargs):
"""SetFS(self, FileSystem fs)"""
return _html.HtmlParser_SetFS(*args, **kwargs)
def GetFS(*args, **kwargs):
"""GetFS(self) -> FileSystem"""
return _html.HtmlParser_GetFS(*args, **kwargs)
def Parse(*args, **kwargs):
"""Parse(self, String source) -> Object"""
return _html.HtmlParser_Parse(*args, **kwargs)
def InitParser(*args, **kwargs):
"""InitParser(self, String source)"""
return _html.HtmlParser_InitParser(*args, **kwargs)
def DoneParser(*args, **kwargs):
"""DoneParser(self)"""
return _html.HtmlParser_DoneParser(*args, **kwargs)
def DoParsing(*args, **kwargs):
"""DoParsing(self)"""
return _html.HtmlParser_DoParsing(*args, **kwargs)
def StopParsing(*args, **kwargs):
"""StopParsing(self)"""
return _html.HtmlParser_StopParsing(*args, **kwargs)
def AddTagHandler(*args, **kwargs):
"""AddTagHandler(self, HtmlTagHandler handler)"""
return _html.HtmlParser_AddTagHandler(*args, **kwargs)
def GetSource(*args, **kwargs):
"""GetSource(self) -> String"""
return _html.HtmlParser_GetSource(*args, **kwargs)
def PushTagHandler(*args, **kwargs):
"""PushTagHandler(self, HtmlTagHandler handler, String tags)"""
return _html.HtmlParser_PushTagHandler(*args, **kwargs)
def PopTagHandler(*args, **kwargs):
"""PopTagHandler(self)"""
return _html.HtmlParser_PopTagHandler(*args, **kwargs)
def GetInnerSource(*args, **kwargs):
"""GetInnerSource(self, HtmlTag tag) -> String"""
return _html.HtmlParser_GetInnerSource(*args, **kwargs)
FS = property(GetFS,SetFS,doc="See `GetFS` and `SetFS`")
Source = property(GetSource,doc="See `GetSource`")
_html.HtmlParser_swigregister(HtmlParser)
class HtmlWinParser(HtmlParser):
"""Proxy of C++ HtmlWinParser class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, HtmlWindow wnd=None) -> HtmlWinParser"""
_html.HtmlWinParser_swiginit(self,_html.new_HtmlWinParser(*args, **kwargs))
def SetDC(*args, **kwargs):
"""SetDC(self, DC dc)"""
return _html.HtmlWinParser_SetDC(*args, **kwargs)
def GetDC(*args, **kwargs):
"""GetDC(self) -> DC"""
return _html.HtmlWinParser_GetDC(*args, **kwargs)
def GetCharHeight(*args, **kwargs):
"""GetCharHeight(self) -> int"""
return _html.HtmlWinParser_GetCharHeight(*args, **kwargs)
def GetCharWidth(*args, **kwargs):
"""GetCharWidth(self) -> int"""
return _html.HtmlWinParser_GetCharWidth(*args, **kwargs)
def GetWindowInterface(*args, **kwargs):
"""GetWindowInterface(self) -> HtmlWindowInterface"""
return _html.HtmlWinParser_GetWindowInterface(*args, **kwargs)
def SetFonts(*args, **kwargs):
"""SetFonts(self, String normal_face, String fixed_face, PyObject sizes=None)"""
return _html.HtmlWinParser_SetFonts(*args, **kwargs)
def SetStandardFonts(*args, **kwargs):
"""SetStandardFonts(self, int size=-1, String normal_face=EmptyString, String fixed_face=EmptyString)"""
return _html.HtmlWinParser_SetStandardFonts(*args, **kwargs)
def GetContainer(*args, **kwargs):
"""GetContainer(self) -> HtmlContainerCell"""
return _html.HtmlWinParser_GetContainer(*args, **kwargs)
def OpenContainer(*args, **kwargs):
"""OpenContainer(self) -> HtmlContainerCell"""
return _html.HtmlWinParser_OpenContainer(*args, **kwargs)
def SetContainer(*args, **kwargs):
"""SetContainer(self, HtmlContainerCell c) -> HtmlContainerCell"""
return _html.HtmlWinParser_SetContainer(*args, **kwargs)
def CloseContainer(*args, **kwargs):
"""CloseContainer(self) -> HtmlContainerCell"""
return _html.HtmlWinParser_CloseContainer(*args, **kwargs)
def GetFontSize(*args, **kwargs):
"""GetFontSize(self) -> int"""
return _html.HtmlWinParser_GetFontSize(*args, **kwargs)
def SetFontSize(*args, **kwargs):
"""SetFontSize(self, int s)"""
return _html.HtmlWinParser_SetFontSize(*args, **kwargs)
def SetFontPointSize(*args, **kwargs):
"""SetFontPointSize(self, int pt)"""
return _html.HtmlWinParser_SetFontPointSize(*args, **kwargs)
def GetFontBold(*args, **kwargs):
"""GetFontBold(self) -> int"""
return _html.HtmlWinParser_GetFontBold(*args, **kwargs)
def SetFontBold(*args, **kwargs):
"""SetFontBold(self, int x)"""
return _html.HtmlWinParser_SetFontBold(*args, **kwargs)
def GetFontItalic(*args, **kwargs):
"""GetFontItalic(self) -> int"""
return _html.HtmlWinParser_GetFontItalic(*args, **kwargs)
def SetFontItalic(*args, **kwargs):
"""SetFontItalic(self, int x)"""
return _html.HtmlWinParser_SetFontItalic(*args, **kwargs)
def GetFontUnderlined(*args, **kwargs):
"""GetFontUnderlined(self) -> int"""
return _html.HtmlWinParser_GetFontUnderlined(*args, **kwargs)
def SetFontUnderlined(*args, **kwargs):
"""SetFontUnderlined(self, int x)"""
return _html.HtmlWinParser_SetFontUnderlined(*args, **kwargs)
def GetFontFixed(*args, **kwargs):
"""GetFontFixed(self) -> int"""
return _html.HtmlWinParser_GetFontFixed(*args, **kwargs)
def SetFontFixed(*args, **kwargs):
"""SetFontFixed(self, int x)"""
return _html.HtmlWinParser_SetFontFixed(*args, **kwargs)
def GetAlign(*args, **kwargs):
"""GetAlign(self) -> int"""
return _html.HtmlWinParser_GetAlign(*args, **kwargs)
def SetAlign(*args, **kwargs):
"""SetAlign(self, int a)"""
return _html.HtmlWinParser_SetAlign(*args, **kwargs)
def GetLinkColor(*args, **kwargs):
"""GetLinkColor(self) -> Colour"""
return _html.HtmlWinParser_GetLinkColor(*args, **kwargs)
def SetLinkColor(*args, **kwargs):
"""SetLinkColor(self, Colour clr)"""
return _html.HtmlWinParser_SetLinkColor(*args, **kwargs)
GetLinkColour = GetLinkColor
SetLinkColour = SetLinkColor
def GetActualColor(*args, **kwargs):
"""GetActualColor(self) -> Colour"""
return _html.HtmlWinParser_GetActualColor(*args, **kwargs)
def SetActualColor(*args, **kwargs):
"""SetActualColor(self, Colour clr)"""
return _html.HtmlWinParser_SetActualColor(*args, **kwargs)
GetActualColour = GetActualColor
SetActualColour = SetActualColor
def SetLink(*args, **kwargs):
"""SetLink(self, String link)"""
return _html.HtmlWinParser_SetLink(*args, **kwargs)
def CreateCurrentFont(*args, **kwargs):
"""CreateCurrentFont(self) -> Font"""
return _html.HtmlWinParser_CreateCurrentFont(*args, **kwargs)
def GetLink(*args, **kwargs):
"""GetLink(self) -> HtmlLinkInfo"""
return _html.HtmlWinParser_GetLink(*args, **kwargs)
ActualColor = property(GetActualColor,SetActualColor,doc="See `GetActualColor` and `SetActualColor`")
ActualColour = property(GetActualColour,SetActualColour,doc="See `GetActualColour` and `SetActualColour`")
Align = property(GetAlign,SetAlign,doc="See `GetAlign` and `SetAlign`")
CharHeight = property(GetCharHeight,doc="See `GetCharHeight`")
CharWidth = property(GetCharWidth,doc="See `GetCharWidth`")
Container = property(GetContainer,SetContainer,doc="See `GetContainer` and `SetContainer`")
DC = property(GetDC,SetDC,doc="See `GetDC` and `SetDC`")
FontBold = property(GetFontBold,SetFontBold,doc="See `GetFontBold` and `SetFontBold`")
FontFixed = property(GetFontFixed,SetFontFixed,doc="See `GetFontFixed` and `SetFontFixed`")
FontItalic = property(GetFontItalic,SetFontItalic,doc="See `GetFontItalic` and `SetFontItalic`")
FontSize = property(GetFontSize,SetFontSize,doc="See `GetFontSize` and `SetFontSize`")
FontUnderlined = property(GetFontUnderlined,SetFontUnderlined,doc="See `GetFontUnderlined` and `SetFontUnderlined`")
Link = property(GetLink,SetLink,doc="See `GetLink` and `SetLink`")
LinkColor = property(GetLinkColor,SetLinkColor,doc="See `GetLinkColor` and `SetLinkColor`")
LinkColour = property(GetLinkColour,SetLinkColour,doc="See `GetLinkColour` and `SetLinkColour`")
WindowInterface = property(GetWindowInterface,doc="See `GetWindowInterface`")
_html.HtmlWinParser_swigregister(HtmlWinParser)
class HtmlTagHandler(_core.Object):
"""Proxy of C++ HtmlTagHandler class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlTagHandler"""
_html.HtmlTagHandler_swiginit(self,_html.new_HtmlTagHandler(*args, **kwargs))
HtmlTagHandler._setCallbackInfo(self, self, HtmlTagHandler)
def _setCallbackInfo(*args, **kwargs):
"""_setCallbackInfo(self, PyObject self, PyObject _class)"""
return _html.HtmlTagHandler__setCallbackInfo(*args, **kwargs)
def SetParser(*args, **kwargs):
"""SetParser(self, HtmlParser parser)"""
return _html.HtmlTagHandler_SetParser(*args, **kwargs)
def GetParser(*args, **kwargs):
"""GetParser(self) -> HtmlParser"""
return _html.HtmlTagHandler_GetParser(*args, **kwargs)
def ParseInner(*args, **kwargs):
"""ParseInner(self, HtmlTag tag)"""
return _html.HtmlTagHandler_ParseInner(*args, **kwargs)
Parser = property(GetParser,SetParser,doc="See `GetParser` and `SetParser`")
_html.HtmlTagHandler_swigregister(HtmlTagHandler)
class HtmlWinTagHandler(HtmlTagHandler):
"""Proxy of C++ HtmlWinTagHandler class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlWinTagHandler"""
_html.HtmlWinTagHandler_swiginit(self,_html.new_HtmlWinTagHandler(*args, **kwargs))
HtmlWinTagHandler._setCallbackInfo(self, self, HtmlWinTagHandler)
def _setCallbackInfo(*args, **kwargs):
"""_setCallbackInfo(self, PyObject self, PyObject _class)"""
return _html.HtmlWinTagHandler__setCallbackInfo(*args, **kwargs)
def SetParser(*args, **kwargs):
"""SetParser(self, HtmlParser parser)"""
return _html.HtmlWinTagHandler_SetParser(*args, **kwargs)
def GetParser(*args, **kwargs):
"""GetParser(self) -> HtmlWinParser"""
return _html.HtmlWinTagHandler_GetParser(*args, **kwargs)
def ParseInner(*args, **kwargs):
"""ParseInner(self, HtmlTag tag)"""
return _html.HtmlWinTagHandler_ParseInner(*args, **kwargs)
Parser = property(GetParser,SetParser,doc="See `GetParser` and `SetParser`")
_html.HtmlWinTagHandler_swigregister(HtmlWinTagHandler)
def HtmlWinParser_AddTagHandler(*args, **kwargs):
"""HtmlWinParser_AddTagHandler(PyObject tagHandlerClass)"""
return _html.HtmlWinParser_AddTagHandler(*args, **kwargs)
#---------------------------------------------------------------------------
class HtmlSelection(object):
"""Proxy of C++ HtmlSelection class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlSelection"""
_html.HtmlSelection_swiginit(self,_html.new_HtmlSelection(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlSelection
__del__ = lambda self : None;
def Set(*args, **kwargs):
"""Set(self, Point fromPos, HtmlCell fromCell, Point toPos, HtmlCell toCell)"""
return _html.HtmlSelection_Set(*args, **kwargs)
def SetCells(*args, **kwargs):
"""SetCells(self, HtmlCell fromCell, HtmlCell toCell)"""
return _html.HtmlSelection_SetCells(*args, **kwargs)
def GetFromCell(*args, **kwargs):
"""GetFromCell(self) -> HtmlCell"""
return _html.HtmlSelection_GetFromCell(*args, **kwargs)
def GetToCell(*args, **kwargs):
"""GetToCell(self) -> HtmlCell"""
return _html.HtmlSelection_GetToCell(*args, **kwargs)
def GetFromPos(*args, **kwargs):
"""GetFromPos(self) -> Point"""
return _html.HtmlSelection_GetFromPos(*args, **kwargs)
def GetToPos(*args, **kwargs):
"""GetToPos(self) -> Point"""
return _html.HtmlSelection_GetToPos(*args, **kwargs)
def IsEmpty(*args, **kwargs):
"""IsEmpty(self) -> bool"""
return _html.HtmlSelection_IsEmpty(*args, **kwargs)
FromCell = property(GetFromCell,doc="See `GetFromCell`")
FromPos = property(GetFromPos,doc="See `GetFromPos`")
ToCell = property(GetToCell,doc="See `GetToCell`")
ToPos = property(GetToPos,doc="See `GetToPos`")
_html.HtmlSelection_swigregister(HtmlSelection)
HTML_SEL_OUT = _html.HTML_SEL_OUT
HTML_SEL_IN = _html.HTML_SEL_IN
HTML_SEL_CHANGING = _html.HTML_SEL_CHANGING
class HtmlRenderingState(object):
"""Proxy of C++ HtmlRenderingState class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlRenderingState"""
_html.HtmlRenderingState_swiginit(self,_html.new_HtmlRenderingState(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlRenderingState
__del__ = lambda self : None;
def SetSelectionState(*args, **kwargs):
"""SetSelectionState(self, int s)"""
return _html.HtmlRenderingState_SetSelectionState(*args, **kwargs)
def GetSelectionState(*args, **kwargs):
"""GetSelectionState(self) -> int"""
return _html.HtmlRenderingState_GetSelectionState(*args, **kwargs)
def SetFgColour(*args, **kwargs):
"""SetFgColour(self, Colour c)"""
return _html.HtmlRenderingState_SetFgColour(*args, **kwargs)
def GetFgColour(*args, **kwargs):
"""GetFgColour(self) -> Colour"""
return _html.HtmlRenderingState_GetFgColour(*args, **kwargs)
def SetBgColour(*args, **kwargs):
"""SetBgColour(self, Colour c)"""
return _html.HtmlRenderingState_SetBgColour(*args, **kwargs)
def GetBgColour(*args, **kwargs):
"""GetBgColour(self) -> Colour"""
return _html.HtmlRenderingState_GetBgColour(*args, **kwargs)
BgColour = property(GetBgColour,SetBgColour,doc="See `GetBgColour` and `SetBgColour`")
FgColour = property(GetFgColour,SetFgColour,doc="See `GetFgColour` and `SetFgColour`")
SelectionState = property(GetSelectionState,SetSelectionState,doc="See `GetSelectionState` and `SetSelectionState`")
_html.HtmlRenderingState_swigregister(HtmlRenderingState)
class HtmlRenderingStyle(object):
"""Proxy of C++ HtmlRenderingStyle class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def GetSelectedTextColour(*args, **kwargs):
"""GetSelectedTextColour(self, Colour clr) -> Colour"""
return _html.HtmlRenderingStyle_GetSelectedTextColour(*args, **kwargs)
def GetSelectedTextBgColour(*args, **kwargs):
"""GetSelectedTextBgColour(self, Colour clr) -> Colour"""
return _html.HtmlRenderingStyle_GetSelectedTextBgColour(*args, **kwargs)
SelectedTextBgColour = property(GetSelectedTextBgColour,doc="See `GetSelectedTextBgColour`")
SelectedTextColour = property(GetSelectedTextColour,doc="See `GetSelectedTextColour`")
_html.HtmlRenderingStyle_swigregister(HtmlRenderingStyle)
class DefaultHtmlRenderingStyle(HtmlRenderingStyle):
"""Proxy of C++ DefaultHtmlRenderingStyle class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
_html.DefaultHtmlRenderingStyle_swigregister(DefaultHtmlRenderingStyle)
class HtmlRenderingInfo(object):
"""Proxy of C++ HtmlRenderingInfo class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlRenderingInfo"""
_html.HtmlRenderingInfo_swiginit(self,_html.new_HtmlRenderingInfo(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlRenderingInfo
__del__ = lambda self : None;
def SetSelection(*args, **kwargs):
"""SetSelection(self, HtmlSelection s)"""
return _html.HtmlRenderingInfo_SetSelection(*args, **kwargs)
def GetSelection(*args, **kwargs):
"""GetSelection(self) -> HtmlSelection"""
return _html.HtmlRenderingInfo_GetSelection(*args, **kwargs)
def SetStyle(*args, **kwargs):
"""SetStyle(self, HtmlRenderingStyle style)"""
return _html.HtmlRenderingInfo_SetStyle(*args, **kwargs)
def GetStyle(*args, **kwargs):
"""GetStyle(self) -> HtmlRenderingStyle"""
return _html.HtmlRenderingInfo_GetStyle(*args, **kwargs)
def GetState(*args, **kwargs):
"""GetState(self) -> HtmlRenderingState"""
return _html.HtmlRenderingInfo_GetState(*args, **kwargs)
Selection = property(GetSelection,SetSelection,doc="See `GetSelection` and `SetSelection`")
State = property(GetState,doc="See `GetState`")
Style = property(GetStyle,SetStyle,doc="See `GetStyle` and `SetStyle`")
_html.HtmlRenderingInfo_swigregister(HtmlRenderingInfo)
#---------------------------------------------------------------------------
HTML_FIND_EXACT = _html.HTML_FIND_EXACT
HTML_FIND_NEAREST_BEFORE = _html.HTML_FIND_NEAREST_BEFORE
HTML_FIND_NEAREST_AFTER = _html.HTML_FIND_NEAREST_AFTER
class HtmlCell(_core.Object):
"""Proxy of C++ HtmlCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlCell"""
_html.HtmlCell_swiginit(self,_html.new_HtmlCell(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlCell
__del__ = lambda self : None;
def GetPosX(*args, **kwargs):
"""GetPosX(self) -> int"""
return _html.HtmlCell_GetPosX(*args, **kwargs)
def GetPosY(*args, **kwargs):
"""GetPosY(self) -> int"""
return _html.HtmlCell_GetPosY(*args, **kwargs)
def GetWidth(*args, **kwargs):
"""GetWidth(self) -> int"""
return _html.HtmlCell_GetWidth(*args, **kwargs)
def GetHeight(*args, **kwargs):
"""GetHeight(self) -> int"""
return _html.HtmlCell_GetHeight(*args, **kwargs)
def GetDescent(*args, **kwargs):
"""GetDescent(self) -> int"""
return _html.HtmlCell_GetDescent(*args, **kwargs)
def GetMaxTotalWidth(*args, **kwargs):
"""GetMaxTotalWidth(self) -> int"""
return _html.HtmlCell_GetMaxTotalWidth(*args, **kwargs)
def GetId(*args, **kwargs):
"""GetId(self) -> String"""
return _html.HtmlCell_GetId(*args, **kwargs)
def SetId(*args, **kwargs):
"""SetId(self, String id)"""
return _html.HtmlCell_SetId(*args, **kwargs)
def GetLink(*args, **kwargs):
"""GetLink(self, int x=0, int y=0) -> HtmlLinkInfo"""
return _html.HtmlCell_GetLink(*args, **kwargs)
def GetNext(*args, **kwargs):
"""GetNext(self) -> HtmlCell"""
return _html.HtmlCell_GetNext(*args, **kwargs)
def GetParent(*args, **kwargs):
"""GetParent(self) -> HtmlContainerCell"""
return _html.HtmlCell_GetParent(*args, **kwargs)
def GetFirstChild(*args, **kwargs):
"""GetFirstChild(self) -> HtmlCell"""
return _html.HtmlCell_GetFirstChild(*args, **kwargs)
def GetMouseCursor(*args, **kwargs):
"""GetMouseCursor(self, HtmlWindowInterface window) -> Cursor"""
return _html.HtmlCell_GetMouseCursor(*args, **kwargs)
def IsFormattingCell(*args, **kwargs):
"""IsFormattingCell(self) -> bool"""
return _html.HtmlCell_IsFormattingCell(*args, **kwargs)
def SetLink(*args, **kwargs):
"""SetLink(self, HtmlLinkInfo link)"""
return _html.HtmlCell_SetLink(*args, **kwargs)
def SetNext(*args, **kwargs):
"""SetNext(self, HtmlCell cell)"""
return _html.HtmlCell_SetNext(*args, **kwargs)
def SetParent(*args, **kwargs):
"""SetParent(self, HtmlContainerCell p)"""
return _html.HtmlCell_SetParent(*args, **kwargs)
def SetPos(*args, **kwargs):
"""SetPos(self, int x, int y)"""
return _html.HtmlCell_SetPos(*args, **kwargs)
def Layout(*args, **kwargs):
"""Layout(self, int w)"""
return _html.HtmlCell_Layout(*args, **kwargs)
def Draw(*args, **kwargs):
"""Draw(self, DC dc, int x, int y, int view_y1, int view_y2, HtmlRenderingInfo info)"""
return _html.HtmlCell_Draw(*args, **kwargs)
def DrawInvisible(*args, **kwargs):
"""DrawInvisible(self, DC dc, int x, int y, HtmlRenderingInfo info)"""
return _html.HtmlCell_DrawInvisible(*args, **kwargs)
def Find(*args, **kwargs):
"""Find(self, int condition, void param) -> HtmlCell"""
return _html.HtmlCell_Find(*args, **kwargs)
def ProcessMouseClick(*args, **kwargs):
"""ProcessMouseClick(self, HtmlWindowInterface window, Point pos, MouseEvent event) -> bool"""
return _html.HtmlCell_ProcessMouseClick(*args, **kwargs)
def SetCanLiveOnPagebreak(*args, **kwargs):
"""SetCanLiveOnPagebreak(self, bool can)"""
return _html.HtmlCell_SetCanLiveOnPagebreak(*args, **kwargs)
def IsLinebreakAllowed(*args, **kwargs):
"""IsLinebreakAllowed(self) -> bool"""
return _html.HtmlCell_IsLinebreakAllowed(*args, **kwargs)
def IsTerminalCell(*args, **kwargs):
"""IsTerminalCell(self) -> bool"""
return _html.HtmlCell_IsTerminalCell(*args, **kwargs)
def FindCellByPos(*args, **kwargs):
"""FindCellByPos(self, int x, int y, unsigned int flags=HTML_FIND_EXACT) -> HtmlCell"""
return _html.HtmlCell_FindCellByPos(*args, **kwargs)
def GetAbsPos(*args, **kwargs):
"""GetAbsPos(self, HtmlCell rootCell=None) -> Point"""
return _html.HtmlCell_GetAbsPos(*args, **kwargs)
def GetRootCell(*args, **kwargs):
"""GetRootCell(self) -> HtmlCell"""
return _html.HtmlCell_GetRootCell(*args, **kwargs)
def GetFirstTerminal(*args, **kwargs):
"""GetFirstTerminal(self) -> HtmlCell"""
return _html.HtmlCell_GetFirstTerminal(*args, **kwargs)
def GetLastTerminal(*args, **kwargs):
"""GetLastTerminal(self) -> HtmlCell"""
return _html.HtmlCell_GetLastTerminal(*args, **kwargs)
def GetDepth(*args, **kwargs):
"""GetDepth(self) -> unsigned int"""
return _html.HtmlCell_GetDepth(*args, **kwargs)
def IsBefore(*args, **kwargs):
"""IsBefore(self, HtmlCell cell) -> bool"""
return _html.HtmlCell_IsBefore(*args, **kwargs)
def ConvertToText(*args, **kwargs):
"""ConvertToText(self, HtmlSelection sel) -> String"""
return _html.HtmlCell_ConvertToText(*args, **kwargs)
Depth = property(GetDepth,doc="See `GetDepth`")
Descent = property(GetDescent,doc="See `GetDescent`")
FirstChild = property(GetFirstChild,doc="See `GetFirstChild`")
FirstTerminal = property(GetFirstTerminal,doc="See `GetFirstTerminal`")
Height = property(GetHeight,doc="See `GetHeight`")
Id = property(GetId,SetId,doc="See `GetId` and `SetId`")
LastTerminal = property(GetLastTerminal,doc="See `GetLastTerminal`")
Link = property(GetLink,SetLink,doc="See `GetLink` and `SetLink`")
MaxTotalWidth = property(GetMaxTotalWidth,doc="See `GetMaxTotalWidth`")
MouseCursor = property(GetMouseCursor,doc="See `GetMouseCursor`")
Next = property(GetNext,SetNext,doc="See `GetNext` and `SetNext`")
Parent = property(GetParent,SetParent,doc="See `GetParent` and `SetParent`")
PosX = property(GetPosX,doc="See `GetPosX`")
PosY = property(GetPosY,doc="See `GetPosY`")
RootCell = property(GetRootCell,doc="See `GetRootCell`")
Width = property(GetWidth,doc="See `GetWidth`")
_html.HtmlCell_swigregister(HtmlCell)
class HtmlWordCell(HtmlCell):
"""Proxy of C++ HtmlWordCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String word, DC dc) -> HtmlWordCell"""
_html.HtmlWordCell_swiginit(self,_html.new_HtmlWordCell(*args, **kwargs))
def ConvertToText(*args, **kwargs):
"""ConvertToText(self, HtmlSelection sel) -> String"""
return _html.HtmlWordCell_ConvertToText(*args, **kwargs)
def IsLinebreakAllowed(*args, **kwargs):
"""IsLinebreakAllowed(self) -> bool"""
return _html.HtmlWordCell_IsLinebreakAllowed(*args, **kwargs)
def SetPreviousWord(*args, **kwargs):
"""SetPreviousWord(self, HtmlWordCell cell)"""
return _html.HtmlWordCell_SetPreviousWord(*args, **kwargs)
_html.HtmlWordCell_swigregister(HtmlWordCell)
class HtmlWordWithTabsCell(HtmlWordCell):
"""Proxy of C++ HtmlWordWithTabsCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String word, String wordOrig, size_t linepos, DC dc) -> HtmlWordWithTabsCell"""
_html.HtmlWordWithTabsCell_swiginit(self,_html.new_HtmlWordWithTabsCell(*args, **kwargs))
_html.HtmlWordWithTabsCell_swigregister(HtmlWordWithTabsCell)
class HtmlContainerCell(HtmlCell):
"""Proxy of C++ HtmlContainerCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, HtmlContainerCell parent) -> HtmlContainerCell"""
_html.HtmlContainerCell_swiginit(self,_html.new_HtmlContainerCell(*args, **kwargs))
def InsertCell(*args, **kwargs):
"""InsertCell(self, HtmlCell cell)"""
return _html.HtmlContainerCell_InsertCell(*args, **kwargs)
def SetAlignHor(*args, **kwargs):
"""SetAlignHor(self, int al)"""
return _html.HtmlContainerCell_SetAlignHor(*args, **kwargs)
def GetAlignHor(*args, **kwargs):
"""GetAlignHor(self) -> int"""
return _html.HtmlContainerCell_GetAlignHor(*args, **kwargs)
def SetAlignVer(*args, **kwargs):
"""SetAlignVer(self, int al)"""
return _html.HtmlContainerCell_SetAlignVer(*args, **kwargs)
def GetAlignVer(*args, **kwargs):
"""GetAlignVer(self) -> int"""
return _html.HtmlContainerCell_GetAlignVer(*args, **kwargs)
def SetIndent(*args, **kwargs):
"""SetIndent(self, int i, int what, int units=HTML_UNITS_PIXELS)"""
return _html.HtmlContainerCell_SetIndent(*args, **kwargs)
def GetIndent(*args, **kwargs):
"""GetIndent(self, int ind) -> int"""
return _html.HtmlContainerCell_GetIndent(*args, **kwargs)
def GetIndentUnits(*args, **kwargs):
"""GetIndentUnits(self, int ind) -> int"""
return _html.HtmlContainerCell_GetIndentUnits(*args, **kwargs)
def SetAlign(*args, **kwargs):
"""SetAlign(self, HtmlTag tag)"""
return _html.HtmlContainerCell_SetAlign(*args, **kwargs)
def SetWidthFloat(*args, **kwargs):
"""SetWidthFloat(self, int w, int units)"""
return _html.HtmlContainerCell_SetWidthFloat(*args, **kwargs)
def SetWidthFloatFromTag(*args, **kwargs):
"""SetWidthFloatFromTag(self, HtmlTag tag)"""
return _html.HtmlContainerCell_SetWidthFloatFromTag(*args, **kwargs)
def SetMinHeight(*args, **kwargs):
"""SetMinHeight(self, int h, int align=HTML_ALIGN_TOP)"""
return _html.HtmlContainerCell_SetMinHeight(*args, **kwargs)
def SetBackgroundColour(*args, **kwargs):
"""SetBackgroundColour(self, Colour clr)"""
return _html.HtmlContainerCell_SetBackgroundColour(*args, **kwargs)
def GetBackgroundColour(*args, **kwargs):
"""GetBackgroundColour(self) -> Colour"""
return _html.HtmlContainerCell_GetBackgroundColour(*args, **kwargs)
def SetBorder(*args, **kwargs):
"""SetBorder(self, Colour clr1, Colour clr2, int border=1)"""
return _html.HtmlContainerCell_SetBorder(*args, **kwargs)
def GetFirstChild(*args, **kwargs):
"""GetFirstChild(self) -> HtmlCell"""
return _html.HtmlContainerCell_GetFirstChild(*args, **kwargs)
AlignHor = property(GetAlignHor,SetAlignHor,doc="See `GetAlignHor` and `SetAlignHor`")
AlignVer = property(GetAlignVer,SetAlignVer,doc="See `GetAlignVer` and `SetAlignVer`")
BackgroundColour = property(GetBackgroundColour,SetBackgroundColour,doc="See `GetBackgroundColour` and `SetBackgroundColour`")
FirstChild = property(GetFirstChild,doc="See `GetFirstChild`")
Indent = property(GetIndent,SetIndent,doc="See `GetIndent` and `SetIndent`")
IndentUnits = property(GetIndentUnits,doc="See `GetIndentUnits`")
_html.HtmlContainerCell_swigregister(HtmlContainerCell)
class HtmlColourCell(HtmlCell):
"""Proxy of C++ HtmlColourCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Colour clr, int flags=HTML_CLR_FOREGROUND) -> HtmlColourCell"""
_html.HtmlColourCell_swiginit(self,_html.new_HtmlColourCell(*args, **kwargs))
_html.HtmlColourCell_swigregister(HtmlColourCell)
class HtmlFontCell(HtmlCell):
"""Proxy of C++ HtmlFontCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Font font) -> HtmlFontCell"""
_html.HtmlFontCell_swiginit(self,_html.new_HtmlFontCell(*args, **kwargs))
_html.HtmlFontCell_swigregister(HtmlFontCell)
class HtmlWidgetCell(HtmlCell):
"""Proxy of C++ HtmlWidgetCell class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Window wnd, int w=0) -> HtmlWidgetCell"""
_html.HtmlWidgetCell_swiginit(self,_html.new_HtmlWidgetCell(*args, **kwargs))
_html.HtmlWidgetCell_swigregister(HtmlWidgetCell)
#---------------------------------------------------------------------------
class HtmlFilter(_core.Object):
"""Proxy of C++ HtmlFilter class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlFilter"""
_html.HtmlFilter_swiginit(self,_html.new_HtmlFilter(*args, **kwargs))
HtmlFilter._setCallbackInfo(self, self, HtmlFilter)
def _setCallbackInfo(*args, **kwargs):
"""_setCallbackInfo(self, PyObject self, PyObject _class)"""
return _html.HtmlFilter__setCallbackInfo(*args, **kwargs)
_html.HtmlFilter_swigregister(HtmlFilter)
class HtmlWindowInterface(object):
"""Proxy of C++ HtmlWindowInterface class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _html.delete_HtmlWindowInterface
__del__ = lambda self : None;
def SetHTMLWindowTitle(*args, **kwargs):
"""SetHTMLWindowTitle(self, String title)"""
return _html.HtmlWindowInterface_SetHTMLWindowTitle(*args, **kwargs)
def HTMLCoordsToWindow(*args, **kwargs):
"""HTMLCoordsToWindow(self, HtmlCell cell, Point pos) -> Point"""
return _html.HtmlWindowInterface_HTMLCoordsToWindow(*args, **kwargs)
def GetHTMLWindow(*args, **kwargs):
"""GetHTMLWindow(self) -> Window"""
return _html.HtmlWindowInterface_GetHTMLWindow(*args, **kwargs)
def GetHTMLBackgroundColour(*args, **kwargs):
"""GetHTMLBackgroundColour(self) -> Colour"""
return _html.HtmlWindowInterface_GetHTMLBackgroundColour(*args, **kwargs)
def SetHTMLBackgroundColour(*args, **kwargs):
"""SetHTMLBackgroundColour(self, Colour clr)"""
return _html.HtmlWindowInterface_SetHTMLBackgroundColour(*args, **kwargs)
def SetHTMLBackgroundImage(*args, **kwargs):
"""SetHTMLBackgroundImage(self, Bitmap bmpBg)"""
return _html.HtmlWindowInterface_SetHTMLBackgroundImage(*args, **kwargs)
def SetHTMLStatusText(*args, **kwargs):
"""SetHTMLStatusText(self, String text)"""
return _html.HtmlWindowInterface_SetHTMLStatusText(*args, **kwargs)
HTMLCursor_Default = _html.HtmlWindowInterface_HTMLCursor_Default
HTMLCursor_Link = _html.HtmlWindowInterface_HTMLCursor_Link
HTMLCursor_Text = _html.HtmlWindowInterface_HTMLCursor_Text
HTMLBackgroundColour = property(GetHTMLBackgroundColour,SetHTMLBackgroundColour,doc="See `GetHTMLBackgroundColour` and `SetHTMLBackgroundColour`")
HTMLWindow = property(GetHTMLWindow,doc="See `GetHTMLWindow`")
_html.HtmlWindowInterface_swigregister(HtmlWindowInterface)
#---------------------------------------------------------------------------
class HtmlWindow(_windows.ScrolledWindow):
"""Proxy of C++ HtmlWindow class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int id=-1, Point pos=DefaultPosition,
Size size=DefaultSize, int style=HW_DEFAULT_STYLE,
String name=HtmlWindowNameStr) -> HtmlWindow
"""
_html.HtmlWindow_swiginit(self,_html.new_HtmlWindow(*args, **kwargs))
self._setOORInfo(self);HtmlWindow._setCallbackInfo(self, self, HtmlWindow)
def Create(*args, **kwargs):
"""
Create(self, Window parent, int id=-1, Point pos=DefaultPosition,
Size size=DefaultSize, int style=HW_SCROLLBAR_AUTO,
String name=HtmlWindowNameStr) -> bool
"""
return _html.HtmlWindow_Create(*args, **kwargs)
def _setCallbackInfo(*args, **kwargs):
"""_setCallbackInfo(self, PyObject self, PyObject _class)"""
return _html.HtmlWindow__setCallbackInfo(*args, **kwargs)
def SetPage(*args, **kwargs):
"""SetPage(self, String source) -> bool"""
return _html.HtmlWindow_SetPage(*args, **kwargs)
def LoadPage(*args, **kwargs):
"""LoadPage(self, String location) -> bool"""
return _html.HtmlWindow_LoadPage(*args, **kwargs)
def LoadFile(*args, **kwargs):
"""LoadFile(self, String filename) -> bool"""
return _html.HtmlWindow_LoadFile(*args, **kwargs)
def AppendToPage(*args, **kwargs):
"""AppendToPage(self, String source) -> bool"""
return _html.HtmlWindow_AppendToPage(*args, **kwargs)
def GetOpenedPage(*args, **kwargs):
"""GetOpenedPage(self) -> String"""
return _html.HtmlWindow_GetOpenedPage(*args, **kwargs)
def GetOpenedAnchor(*args, **kwargs):
"""GetOpenedAnchor(self) -> String"""
return _html.HtmlWindow_GetOpenedAnchor(*args, **kwargs)
def GetOpenedPageTitle(*args, **kwargs):
"""GetOpenedPageTitle(self) -> String"""
return _html.HtmlWindow_GetOpenedPageTitle(*args, **kwargs)
def SetRelatedFrame(*args, **kwargs):
"""SetRelatedFrame(self, Frame frame, String format)"""
return _html.HtmlWindow_SetRelatedFrame(*args, **kwargs)
def GetRelatedFrame(*args, **kwargs):
"""GetRelatedFrame(self) -> Frame"""
return _html.HtmlWindow_GetRelatedFrame(*args, **kwargs)
def SetRelatedStatusBar(*args):
"""
SetRelatedStatusBar(self, int bar)
SetRelatedStatusBar(self, StatusBar ?, int index=0)
"""
return _html.HtmlWindow_SetRelatedStatusBar(*args)
def SetFonts(*args, **kwargs):
"""SetFonts(self, String normal_face, String fixed_face, PyObject sizes=None)"""
return _html.HtmlWindow_SetFonts(*args, **kwargs)
def SetStandardFonts(*args, **kwargs):
"""SetStandardFonts(self, int size=-1, String normal_face=EmptyString, String fixed_face=EmptyString)"""
return _html.HtmlWindow_SetStandardFonts(*args, **kwargs)
def SetBorders(*args, **kwargs):
"""SetBorders(self, int b)"""
return _html.HtmlWindow_SetBorders(*args, **kwargs)
def SetBackgroundImage(*args, **kwargs):
"""SetBackgroundImage(self, Bitmap bmpBg)"""
return _html.HtmlWindow_SetBackgroundImage(*args, **kwargs)
def ReadCustomization(*args, **kwargs):
"""ReadCustomization(self, ConfigBase cfg, String path=EmptyString)"""
return _html.HtmlWindow_ReadCustomization(*args, **kwargs)
def WriteCustomization(*args, **kwargs):
"""WriteCustomization(self, ConfigBase cfg, String path=EmptyString)"""
return _html.HtmlWindow_WriteCustomization(*args, **kwargs)
def HistoryBack(*args, **kwargs):
"""HistoryBack(self) -> bool"""
return _html.HtmlWindow_HistoryBack(*args, **kwargs)
def HistoryForward(*args, **kwargs):
"""HistoryForward(self) -> bool"""
return _html.HtmlWindow_HistoryForward(*args, **kwargs)
def HistoryCanBack(*args, **kwargs):
"""HistoryCanBack(self) -> bool"""
return _html.HtmlWindow_HistoryCanBack(*args, **kwargs)
def HistoryCanForward(*args, **kwargs):
"""HistoryCanForward(self) -> bool"""
return _html.HtmlWindow_HistoryCanForward(*args, **kwargs)
def HistoryClear(*args, **kwargs):
"""HistoryClear(self)"""
return _html.HtmlWindow_HistoryClear(*args, **kwargs)
def GetInternalRepresentation(*args, **kwargs):
"""GetInternalRepresentation(self) -> HtmlContainerCell"""
return _html.HtmlWindow_GetInternalRepresentation(*args, **kwargs)
def GetParser(*args, **kwargs):
"""GetParser(self) -> HtmlWinParser"""
return _html.HtmlWindow_GetParser(*args, **kwargs)
def ScrollToAnchor(*args, **kwargs):
"""ScrollToAnchor(self, String anchor) -> bool"""
return _html.HtmlWindow_ScrollToAnchor(*args, **kwargs)
def HasAnchor(*args, **kwargs):
"""HasAnchor(self, String anchor) -> bool"""
return _html.HtmlWindow_HasAnchor(*args, **kwargs)
def AddFilter(*args, **kwargs):
"""AddFilter(HtmlFilter filter)"""
return _html.HtmlWindow_AddFilter(*args, **kwargs)
AddFilter = staticmethod(AddFilter)
def SelectWord(*args, **kwargs):
"""SelectWord(self, Point pos)"""
return _html.HtmlWindow_SelectWord(*args, **kwargs)
def SelectLine(*args, **kwargs):
"""SelectLine(self, Point pos)"""
return _html.HtmlWindow_SelectLine(*args, **kwargs)
def SelectAll(*args, **kwargs):
"""SelectAll(self)"""
return _html.HtmlWindow_SelectAll(*args, **kwargs)
def SelectionToText(*args, **kwargs):
"""SelectionToText(self) -> String"""
return _html.HtmlWindow_SelectionToText(*args, **kwargs)
def ToText(*args, **kwargs):
"""ToText(self) -> String"""
return _html.HtmlWindow_ToText(*args, **kwargs)
def OnLinkClicked(*args, **kwargs):
"""OnLinkClicked(self, HtmlLinkInfo link)"""
return _html.HtmlWindow_OnLinkClicked(*args, **kwargs)
def OnSetTitle(*args, **kwargs):
"""OnSetTitle(self, String title)"""
return _html.HtmlWindow_OnSetTitle(*args, **kwargs)
def OnCellMouseHover(*args, **kwargs):
"""OnCellMouseHover(self, HtmlCell cell, int x, int y)"""
return _html.HtmlWindow_OnCellMouseHover(*args, **kwargs)
def OnCellClicked(*args, **kwargs):
"""OnCellClicked(self, HtmlCell cell, int x, int y, MouseEvent event) -> bool"""
return _html.HtmlWindow_OnCellClicked(*args, **kwargs)
def OnOpeningURL(*args, **kwargs):
"""OnOpeningURL(self, int type, String url, String redirect) -> int"""
return _html.HtmlWindow_OnOpeningURL(*args, **kwargs)
def base_OnLinkClicked(*args, **kw):
return HtmlWindow.OnLinkClicked(*args, **kw)
base_OnLinkClicked = wx.deprecated(base_OnLinkClicked,
"Please use HtmlWindow.OnLinkClicked instead.")
def base_OnSetTitle(*args, **kw):
return HtmlWindow.OnSetTitle(*args, **kw)
base_OnSetTitle = wx.deprecated(base_OnSetTitle,
"Please use HtmlWindow.OnSetTitle instead.")
def base_OnCellMouseHover(*args, **kw):
return HtmlWindow.OnCellMouseHover(*args, **kw)
base_OnCellMouseHover = wx.deprecated(base_OnCellMouseHover,
"Please use HtmlWindow.OnCellMouseHover instead.")
def base_OnCellClicked(*args, **kw):
return HtmlWindow.OnCellClicked(*args, **kw)
base_OnCellClicked = wx.deprecated(base_OnCellClicked,
"Please use HtmlWindow.OnCellClicked instead.")
def GetClassDefaultAttributes(*args, **kwargs):
"""
GetClassDefaultAttributes(int variant=WINDOW_VARIANT_NORMAL) -> VisualAttributes
Get the default attributes for this class. This is useful if you want
to use the same font or colour in your own control as in a standard
control -- which is a much better idea than hard coding specific
colours or fonts which might look completely out of place on the
user's system, especially if it uses themes.
The variant parameter is only relevant under Mac currently and is
ignore under other platforms. Under Mac, it will change the size of
the returned font. See `wx.Window.SetWindowVariant` for more about
this.
"""
return _html.HtmlWindow_GetClassDefaultAttributes(*args, **kwargs)
GetClassDefaultAttributes = staticmethod(GetClassDefaultAttributes)
HTMLCursor_Default = _html.HtmlWindow_HTMLCursor_Default
HTMLCursor_Link = _html.HtmlWindow_HTMLCursor_Link
HTMLCursor_Text = _html.HtmlWindow_HTMLCursor_Text
def GetDefaultHTMLCursor(*args, **kwargs):
"""GetDefaultHTMLCursor(int type) -> Cursor"""
return _html.HtmlWindow_GetDefaultHTMLCursor(*args, **kwargs)
GetDefaultHTMLCursor = staticmethod(GetDefaultHTMLCursor)
InternalRepresentation = property(GetInternalRepresentation,doc="See `GetInternalRepresentation`")
OpenedAnchor = property(GetOpenedAnchor,doc="See `GetOpenedAnchor`")
OpenedPage = property(GetOpenedPage,doc="See `GetOpenedPage`")
OpenedPageTitle = property(GetOpenedPageTitle,doc="See `GetOpenedPageTitle`")
Parser = property(GetParser,doc="See `GetParser`")
RelatedFrame = property(GetRelatedFrame,doc="See `GetRelatedFrame`")
_html.HtmlWindow_swigregister(HtmlWindow)
def PreHtmlWindow(*args, **kwargs):
"""PreHtmlWindow() -> HtmlWindow"""
val = _html.new_PreHtmlWindow(*args, **kwargs)
return val
def HtmlWindow_AddFilter(*args, **kwargs):
"""HtmlWindow_AddFilter(HtmlFilter filter)"""
return _html.HtmlWindow_AddFilter(*args, **kwargs)
def HtmlWindow_GetClassDefaultAttributes(*args, **kwargs):
"""
HtmlWindow_GetClassDefaultAttributes(int variant=WINDOW_VARIANT_NORMAL) -> VisualAttributes
Get the default attributes for this class. This is useful if you want
to use the same font or colour in your own control as in a standard
control -- which is a much better idea than hard coding specific
colours or fonts which might look completely out of place on the
user's system, especially if it uses themes.
The variant parameter is only relevant under Mac currently and is
ignore under other platforms. Under Mac, it will change the size of
the returned font. See `wx.Window.SetWindowVariant` for more about
this.
"""
return _html.HtmlWindow_GetClassDefaultAttributes(*args, **kwargs)
def HtmlWindow_GetDefaultHTMLCursor(*args, **kwargs):
"""HtmlWindow_GetDefaultHTMLCursor(int type) -> Cursor"""
return _html.HtmlWindow_GetDefaultHTMLCursor(*args, **kwargs)
#---------------------------------------------------------------------------
class HtmlDCRenderer(_core.Object):
"""Proxy of C++ HtmlDCRenderer class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlDCRenderer"""
_html.HtmlDCRenderer_swiginit(self,_html.new_HtmlDCRenderer(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlDCRenderer
__del__ = lambda self : None;
def SetDC(*args):
"""
SetDC(self, DC dc, double pixel_scale=1.0)
SetDC(self, DC dc, double pixel_scale, double font_scale)
"""
return _html.HtmlDCRenderer_SetDC(*args)
def SetSize(*args, **kwargs):
"""SetSize(self, int width, int height)"""
return _html.HtmlDCRenderer_SetSize(*args, **kwargs)
def SetHtmlText(*args, **kwargs):
"""SetHtmlText(self, String html, String basepath=EmptyString, bool isdir=True)"""
return _html.HtmlDCRenderer_SetHtmlText(*args, **kwargs)
def SetFonts(*args, **kwargs):
"""SetFonts(self, String normal_face, String fixed_face, PyObject sizes=None)"""
return _html.HtmlDCRenderer_SetFonts(*args, **kwargs)
def SetStandardFonts(*args, **kwargs):
"""SetStandardFonts(self, int size=-1, String normal_face=EmptyString, String fixed_face=EmptyString)"""
return _html.HtmlDCRenderer_SetStandardFonts(*args, **kwargs)
def Render(*args, **kwargs):
"""
Render(self, int x, int y, wxArrayInt known_pagebreaks, int from=0,
int dont_render=False, int to=INT_MAX) -> int
"""
return _html.HtmlDCRenderer_Render(*args, **kwargs)
def GetTotalHeight(*args, **kwargs):
"""GetTotalHeight(self) -> int"""
return _html.HtmlDCRenderer_GetTotalHeight(*args, **kwargs)
TotalHeight = property(GetTotalHeight,doc="See `GetTotalHeight`")
_html.HtmlDCRenderer_swigregister(HtmlDCRenderer)
PAGE_ODD = _html.PAGE_ODD
PAGE_EVEN = _html.PAGE_EVEN
PAGE_ALL = _html.PAGE_ALL
class HtmlPrintout(_windows.Printout):
"""Proxy of C++ HtmlPrintout class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String title=HtmlPrintoutTitleStr) -> HtmlPrintout"""
_html.HtmlPrintout_swiginit(self,_html.new_HtmlPrintout(*args, **kwargs))
def SetHtmlText(*args, **kwargs):
"""SetHtmlText(self, String html, String basepath=EmptyString, bool isdir=True)"""
return _html.HtmlPrintout_SetHtmlText(*args, **kwargs)
def SetHtmlFile(*args, **kwargs):
"""SetHtmlFile(self, String htmlfile)"""
return _html.HtmlPrintout_SetHtmlFile(*args, **kwargs)
def SetHeader(*args, **kwargs):
"""SetHeader(self, String header, int pg=PAGE_ALL)"""
return _html.HtmlPrintout_SetHeader(*args, **kwargs)
def SetFooter(*args, **kwargs):
"""SetFooter(self, String footer, int pg=PAGE_ALL)"""
return _html.HtmlPrintout_SetFooter(*args, **kwargs)
def SetFonts(*args, **kwargs):
"""SetFonts(self, String normal_face, String fixed_face, PyObject sizes=None)"""
return _html.HtmlPrintout_SetFonts(*args, **kwargs)
def SetStandardFonts(*args, **kwargs):
"""SetStandardFonts(self, int size=-1, String normal_face=EmptyString, String fixed_face=EmptyString)"""
return _html.HtmlPrintout_SetStandardFonts(*args, **kwargs)
def SetMargins(*args, **kwargs):
"""
SetMargins(self, float top=25.2, float bottom=25.2, float left=25.2,
float right=25.2, float spaces=5)
"""
return _html.HtmlPrintout_SetMargins(*args, **kwargs)
def AddFilter(*args, **kwargs):
"""AddFilter(wxHtmlFilter filter)"""
return _html.HtmlPrintout_AddFilter(*args, **kwargs)
AddFilter = staticmethod(AddFilter)
def CleanUpStatics(*args, **kwargs):
"""CleanUpStatics()"""
return _html.HtmlPrintout_CleanUpStatics(*args, **kwargs)
CleanUpStatics = staticmethod(CleanUpStatics)
_html.HtmlPrintout_swigregister(HtmlPrintout)
def HtmlPrintout_AddFilter(*args, **kwargs):
"""HtmlPrintout_AddFilter(wxHtmlFilter filter)"""
return _html.HtmlPrintout_AddFilter(*args, **kwargs)
def HtmlPrintout_CleanUpStatics(*args):
"""HtmlPrintout_CleanUpStatics()"""
return _html.HtmlPrintout_CleanUpStatics(*args)
class HtmlEasyPrinting(_core.Object):
"""Proxy of C++ HtmlEasyPrinting class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String name=HtmlPrintingTitleStr, Window parentWindow=None) -> HtmlEasyPrinting"""
_html.HtmlEasyPrinting_swiginit(self,_html.new_HtmlEasyPrinting(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlEasyPrinting
__del__ = lambda self : None;
def PreviewFile(*args, **kwargs):
"""PreviewFile(self, String htmlfile) -> bool"""
return _html.HtmlEasyPrinting_PreviewFile(*args, **kwargs)
def PreviewText(*args, **kwargs):
"""PreviewText(self, String htmltext, String basepath=EmptyString) -> bool"""
return _html.HtmlEasyPrinting_PreviewText(*args, **kwargs)
def PrintFile(*args, **kwargs):
"""PrintFile(self, String htmlfile) -> bool"""
return _html.HtmlEasyPrinting_PrintFile(*args, **kwargs)
def PrintText(*args, **kwargs):
"""PrintText(self, String htmltext, String basepath=EmptyString) -> bool"""
return _html.HtmlEasyPrinting_PrintText(*args, **kwargs)
def PageSetup(*args, **kwargs):
"""PageSetup(self)"""
return _html.HtmlEasyPrinting_PageSetup(*args, **kwargs)
def SetHeader(*args, **kwargs):
"""SetHeader(self, String header, int pg=PAGE_ALL)"""
return _html.HtmlEasyPrinting_SetHeader(*args, **kwargs)
def SetFooter(*args, **kwargs):
"""SetFooter(self, String footer, int pg=PAGE_ALL)"""
return _html.HtmlEasyPrinting_SetFooter(*args, **kwargs)
def SetFonts(*args, **kwargs):
"""SetFonts(self, String normal_face, String fixed_face, PyObject sizes=None)"""
return _html.HtmlEasyPrinting_SetFonts(*args, **kwargs)
def SetStandardFonts(*args, **kwargs):
"""SetStandardFonts(self, int size=-1, String normal_face=EmptyString, String fixed_face=EmptyString)"""
return _html.HtmlEasyPrinting_SetStandardFonts(*args, **kwargs)
def GetPrintData(*args, **kwargs):
"""GetPrintData(self) -> PrintData"""
return _html.HtmlEasyPrinting_GetPrintData(*args, **kwargs)
def GetPageSetupData(*args, **kwargs):
"""GetPageSetupData(self) -> PageSetupDialogData"""
return _html.HtmlEasyPrinting_GetPageSetupData(*args, **kwargs)
def GetParentWindow(*args, **kwargs):
"""GetParentWindow(self) -> Window"""
return _html.HtmlEasyPrinting_GetParentWindow(*args, **kwargs)
def SetParentWindow(*args, **kwargs):
"""SetParentWindow(self, Window window)"""
return _html.HtmlEasyPrinting_SetParentWindow(*args, **kwargs)
def GetName(*args, **kwargs):
"""GetName(self) -> String"""
return _html.HtmlEasyPrinting_GetName(*args, **kwargs)
def SetName(*args, **kwargs):
"""SetName(self, String name)"""
return _html.HtmlEasyPrinting_SetName(*args, **kwargs)
PageSetupData = property(GetPageSetupData,doc="See `GetPageSetupData`")
PrintData = property(GetPrintData,doc="See `GetPrintData`")
ParentWindow = property(GetParentWindow,SetParentWindow)
Name = property(GetName,SetName)
_html.HtmlEasyPrinting_swigregister(HtmlEasyPrinting)
#---------------------------------------------------------------------------
class HtmlBookRecord(object):
"""Proxy of C++ HtmlBookRecord class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String bookfile, String basepath, String title, String start) -> HtmlBookRecord"""
_html.HtmlBookRecord_swiginit(self,_html.new_HtmlBookRecord(*args, **kwargs))
def GetBookFile(*args, **kwargs):
"""GetBookFile(self) -> String"""
return _html.HtmlBookRecord_GetBookFile(*args, **kwargs)
def GetTitle(*args, **kwargs):
"""GetTitle(self) -> String"""
return _html.HtmlBookRecord_GetTitle(*args, **kwargs)
def GetStart(*args, **kwargs):
"""GetStart(self) -> String"""
return _html.HtmlBookRecord_GetStart(*args, **kwargs)
def GetBasePath(*args, **kwargs):
"""GetBasePath(self) -> String"""
return _html.HtmlBookRecord_GetBasePath(*args, **kwargs)
def SetContentsRange(*args, **kwargs):
"""SetContentsRange(self, int start, int end)"""
return _html.HtmlBookRecord_SetContentsRange(*args, **kwargs)
def GetContentsStart(*args, **kwargs):
"""GetContentsStart(self) -> int"""
return _html.HtmlBookRecord_GetContentsStart(*args, **kwargs)
def GetContentsEnd(*args, **kwargs):
"""GetContentsEnd(self) -> int"""
return _html.HtmlBookRecord_GetContentsEnd(*args, **kwargs)
def SetTitle(*args, **kwargs):
"""SetTitle(self, String title)"""
return _html.HtmlBookRecord_SetTitle(*args, **kwargs)
def SetBasePath(*args, **kwargs):
"""SetBasePath(self, String path)"""
return _html.HtmlBookRecord_SetBasePath(*args, **kwargs)
def SetStart(*args, **kwargs):
"""SetStart(self, String start)"""
return _html.HtmlBookRecord_SetStart(*args, **kwargs)
def GetFullPath(*args, **kwargs):
"""GetFullPath(self, String page) -> String"""
return _html.HtmlBookRecord_GetFullPath(*args, **kwargs)
BasePath = property(GetBasePath,SetBasePath,doc="See `GetBasePath` and `SetBasePath`")
BookFile = property(GetBookFile,doc="See `GetBookFile`")
ContentsEnd = property(GetContentsEnd,doc="See `GetContentsEnd`")
ContentsStart = property(GetContentsStart,doc="See `GetContentsStart`")
FullPath = property(GetFullPath,doc="See `GetFullPath`")
Start = property(GetStart,SetStart,doc="See `GetStart` and `SetStart`")
Title = property(GetTitle,SetTitle,doc="See `GetTitle` and `SetTitle`")
_html.HtmlBookRecord_swigregister(HtmlBookRecord)
class HtmlSearchStatus(object):
"""Proxy of C++ HtmlSearchStatus class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def Search(*args, **kwargs):
"""Search(self) -> bool"""
return _html.HtmlSearchStatus_Search(*args, **kwargs)
def IsActive(*args, **kwargs):
"""IsActive(self) -> bool"""
return _html.HtmlSearchStatus_IsActive(*args, **kwargs)
def GetCurIndex(*args, **kwargs):
"""GetCurIndex(self) -> int"""
return _html.HtmlSearchStatus_GetCurIndex(*args, **kwargs)
def GetMaxIndex(*args, **kwargs):
"""GetMaxIndex(self) -> int"""
return _html.HtmlSearchStatus_GetMaxIndex(*args, **kwargs)
def GetName(*args, **kwargs):
"""GetName(self) -> String"""
return _html.HtmlSearchStatus_GetName(*args, **kwargs)
CurIndex = property(GetCurIndex,doc="See `GetCurIndex`")
MaxIndex = property(GetMaxIndex,doc="See `GetMaxIndex`")
Name = property(GetName,doc="See `GetName`")
_html.HtmlSearchStatus_swigregister(HtmlSearchStatus)
class HtmlHelpData(object):
"""Proxy of C++ HtmlHelpData class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> HtmlHelpData"""
_html.HtmlHelpData_swiginit(self,_html.new_HtmlHelpData(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlHelpData
__del__ = lambda self : None;
def SetTempDir(*args, **kwargs):
"""SetTempDir(self, String path)"""
return _html.HtmlHelpData_SetTempDir(*args, **kwargs)
def AddBook(*args, **kwargs):
"""AddBook(self, String book) -> bool"""
return _html.HtmlHelpData_AddBook(*args, **kwargs)
def FindPageByName(*args, **kwargs):
"""FindPageByName(self, String page) -> String"""
return _html.HtmlHelpData_FindPageByName(*args, **kwargs)
def FindPageById(*args, **kwargs):
"""FindPageById(self, int id) -> String"""
return _html.HtmlHelpData_FindPageById(*args, **kwargs)
def GetBookRecArray(*args, **kwargs):
"""GetBookRecArray(self) -> wxHtmlBookRecArray"""
return _html.HtmlHelpData_GetBookRecArray(*args, **kwargs)
BookRecArray = property(GetBookRecArray,doc="See `GetBookRecArray`")
_html.HtmlHelpData_swigregister(HtmlHelpData)
HF_TOOLBAR = _html.HF_TOOLBAR
HF_CONTENTS = _html.HF_CONTENTS
HF_INDEX = _html.HF_INDEX
HF_SEARCH = _html.HF_SEARCH
HF_BOOKMARKS = _html.HF_BOOKMARKS
HF_OPEN_FILES = _html.HF_OPEN_FILES
HF_PRINT = _html.HF_PRINT
HF_FLAT_TOOLBAR = _html.HF_FLAT_TOOLBAR
HF_MERGE_BOOKS = _html.HF_MERGE_BOOKS
HF_ICONS_BOOK = _html.HF_ICONS_BOOK
HF_ICONS_BOOK_CHAPTER = _html.HF_ICONS_BOOK_CHAPTER
HF_ICONS_FOLDER = _html.HF_ICONS_FOLDER
HF_DEFAULT_STYLE = _html.HF_DEFAULT_STYLE
HF_EMBEDDED = _html.HF_EMBEDDED
HF_DIALOG = _html.HF_DIALOG
HF_FRAME = _html.HF_FRAME
HF_MODAL = _html.HF_MODAL
ID_HTML_PANEL = _html.ID_HTML_PANEL
ID_HTML_BACK = _html.ID_HTML_BACK
ID_HTML_FORWARD = _html.ID_HTML_FORWARD
ID_HTML_UPNODE = _html.ID_HTML_UPNODE
ID_HTML_UP = _html.ID_HTML_UP
ID_HTML_DOWN = _html.ID_HTML_DOWN
ID_HTML_PRINT = _html.ID_HTML_PRINT
ID_HTML_OPENFILE = _html.ID_HTML_OPENFILE
ID_HTML_OPTIONS = _html.ID_HTML_OPTIONS
ID_HTML_BOOKMARKSLIST = _html.ID_HTML_BOOKMARKSLIST
ID_HTML_BOOKMARKSADD = _html.ID_HTML_BOOKMARKSADD
ID_HTML_BOOKMARKSREMOVE = _html.ID_HTML_BOOKMARKSREMOVE
ID_HTML_TREECTRL = _html.ID_HTML_TREECTRL
ID_HTML_INDEXPAGE = _html.ID_HTML_INDEXPAGE
ID_HTML_INDEXLIST = _html.ID_HTML_INDEXLIST
ID_HTML_INDEXTEXT = _html.ID_HTML_INDEXTEXT
ID_HTML_INDEXBUTTON = _html.ID_HTML_INDEXBUTTON
ID_HTML_INDEXBUTTONALL = _html.ID_HTML_INDEXBUTTONALL
ID_HTML_NOTEBOOK = _html.ID_HTML_NOTEBOOK
ID_HTML_SEARCHPAGE = _html.ID_HTML_SEARCHPAGE
ID_HTML_SEARCHTEXT = _html.ID_HTML_SEARCHTEXT
ID_HTML_SEARCHLIST = _html.ID_HTML_SEARCHLIST
ID_HTML_SEARCHBUTTON = _html.ID_HTML_SEARCHBUTTON
ID_HTML_SEARCHCHOICE = _html.ID_HTML_SEARCHCHOICE
ID_HTML_COUNTINFO = _html.ID_HTML_COUNTINFO
class HtmlHelpWindow(_core.Window):
"""Proxy of C++ HtmlHelpWindow class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int ?, Point pos=DefaultPosition, Size size=DefaultSize,
int style=wxTAB_TRAVERSAL|wxNO_BORDER,
int helpStyle=HF_DEFAULT_STYLE,
HtmlHelpData data=None) -> HtmlHelpWindow
"""
_html.HtmlHelpWindow_swiginit(self,_html.new_HtmlHelpWindow(*args, **kwargs))
self._setOORInfo(self)
def Create(*args, **kwargs):
"""
Create(self, Window parent, int id, Point pos=DefaultPosition, Size size=DefaultSize,
int style=wxTAB_TRAVERSAL|wxNO_BORDER,
int helpStyle=HF_DEFAULT_STYLE) -> bool
"""
return _html.HtmlHelpWindow_Create(*args, **kwargs)
def GetData(*args, **kwargs):
"""GetData(self) -> HtmlHelpData"""
return _html.HtmlHelpWindow_GetData(*args, **kwargs)
def GetController(*args, **kwargs):
"""GetController(self) -> HtmlHelpController"""
return _html.HtmlHelpWindow_GetController(*args, **kwargs)
def SetController(*args, **kwargs):
"""SetController(self, HtmlHelpController controller)"""
return _html.HtmlHelpWindow_SetController(*args, **kwargs)
def Display(*args, **kwargs):
"""Display(self, String x) -> bool"""
return _html.HtmlHelpWindow_Display(*args, **kwargs)
def DisplayID(*args, **kwargs):
"""DisplayID(self, int id) -> bool"""
return _html.HtmlHelpWindow_DisplayID(*args, **kwargs)
def DisplayContents(*args, **kwargs):
"""DisplayContents(self) -> bool"""
return _html.HtmlHelpWindow_DisplayContents(*args, **kwargs)
def DisplayIndex(*args, **kwargs):
"""DisplayIndex(self) -> bool"""
return _html.HtmlHelpWindow_DisplayIndex(*args, **kwargs)
def KeywordSearch(*args, **kwargs):
"""KeywordSearch(self, String keyword, wxHelpSearchMode mode=wxHELP_SEARCH_ALL) -> bool"""
return _html.HtmlHelpWindow_KeywordSearch(*args, **kwargs)
def UseConfig(*args, **kwargs):
"""UseConfig(self, ConfigBase config, String rootpath=wxEmptyString)"""
return _html.HtmlHelpWindow_UseConfig(*args, **kwargs)
def ReadCustomization(*args, **kwargs):
"""ReadCustomization(self, ConfigBase cfg, String path=wxEmptyString)"""
return _html.HtmlHelpWindow_ReadCustomization(*args, **kwargs)
def WriteCustomization(*args, **kwargs):
"""WriteCustomization(self, ConfigBase cfg, String path=wxEmptyString)"""
return _html.HtmlHelpWindow_WriteCustomization(*args, **kwargs)
def NotifyPageChanged(*args, **kwargs):
"""NotifyPageChanged(self)"""
return _html.HtmlHelpWindow_NotifyPageChanged(*args, **kwargs)
def RefreshLists(*args, **kwargs):
"""RefreshLists(self)"""
return _html.HtmlHelpWindow_RefreshLists(*args, **kwargs)
def GetHtmlWindow(*args, **kwargs):
"""GetHtmlWindow(self) -> HtmlWindow"""
return _html.HtmlHelpWindow_GetHtmlWindow(*args, **kwargs)
def GetSplitterWindow(*args, **kwargs):
"""GetSplitterWindow(self) -> SplitterWindow"""
return _html.HtmlHelpWindow_GetSplitterWindow(*args, **kwargs)
def GetToolBar(*args, **kwargs):
"""GetToolBar(self) -> wxToolBar"""
return _html.HtmlHelpWindow_GetToolBar(*args, **kwargs)
def GetCfgData(*args, **kwargs):
"""GetCfgData(self) -> wxHtmlHelpFrameCfg"""
return _html.HtmlHelpWindow_GetCfgData(*args, **kwargs)
def GetTreeCtrl(*args, **kwargs):
"""GetTreeCtrl(self) -> wxPyTreeCtrl"""
return _html.HtmlHelpWindow_GetTreeCtrl(*args, **kwargs)
CfgData = property(GetCfgData,doc="See `GetCfgData`")
Controller = property(GetController,SetController,doc="See `GetController` and `SetController`")
Data = property(GetData,doc="See `GetData`")
HtmlWindow = property(GetHtmlWindow,doc="See `GetHtmlWindow`")
SplitterWindow = property(GetSplitterWindow,doc="See `GetSplitterWindow`")
ToolBar = property(GetToolBar,doc="See `GetToolBar`")
TreeCtrl = property(GetTreeCtrl,doc="See `GetTreeCtrl`")
_html.HtmlHelpWindow_swigregister(HtmlHelpWindow)
def PreHtmlHelpWindow(*args, **kwargs):
"""PreHtmlHelpWindow(HtmlHelpData data=None) -> HtmlHelpWindow"""
val = _html.new_PreHtmlHelpWindow(*args, **kwargs)
self._setOORInfo(self)
return val
wxEVT_COMMAND_HTML_CELL_CLICKED = _html.wxEVT_COMMAND_HTML_CELL_CLICKED
wxEVT_COMMAND_HTML_CELL_HOVER = _html.wxEVT_COMMAND_HTML_CELL_HOVER
wxEVT_COMMAND_HTML_LINK_CLICKED = _html.wxEVT_COMMAND_HTML_LINK_CLICKED
class HtmlCellEvent(_core.CommandEvent):
"""Proxy of C++ HtmlCellEvent class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, EventType commandType, int id, HtmlCell cell, Point pt,
MouseEvent ev) -> HtmlCellEvent
"""
_html.HtmlCellEvent_swiginit(self,_html.new_HtmlCellEvent(*args, **kwargs))
def GetCell(*args, **kwargs):
"""GetCell(self) -> HtmlCell"""
return _html.HtmlCellEvent_GetCell(*args, **kwargs)
def GetPoint(*args, **kwargs):
"""GetPoint(self) -> Point"""
return _html.HtmlCellEvent_GetPoint(*args, **kwargs)
def GetMouseEvent(*args, **kwargs):
"""GetMouseEvent(self) -> MouseEvent"""
return _html.HtmlCellEvent_GetMouseEvent(*args, **kwargs)
def SetLinkClicked(*args, **kwargs):
"""SetLinkClicked(self, bool linkclicked)"""
return _html.HtmlCellEvent_SetLinkClicked(*args, **kwargs)
def GetLinkClicked(*args, **kwargs):
"""GetLinkClicked(self) -> bool"""
return _html.HtmlCellEvent_GetLinkClicked(*args, **kwargs)
_html.HtmlCellEvent_swigregister(HtmlCellEvent)
class HtmlLinkEvent(_core.CommandEvent):
"""Proxy of C++ HtmlLinkEvent class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, int id, HtmlLinkInfo linkinfo) -> HtmlLinkEvent"""
_html.HtmlLinkEvent_swiginit(self,_html.new_HtmlLinkEvent(*args, **kwargs))
def GetLinkInfo(*args, **kwargs):
"""GetLinkInfo(self) -> HtmlLinkInfo"""
return _html.HtmlLinkEvent_GetLinkInfo(*args, **kwargs)
_html.HtmlLinkEvent_swigregister(HtmlLinkEvent)
EVT_HTML_CELL_CLICKED = wx.PyEventBinder( wxEVT_COMMAND_HTML_CELL_CLICKED, 1 )
EVT_HTML_CELL_HOVER = wx.PyEventBinder( wxEVT_COMMAND_HTML_CELL_HOVER, 1 )
EVT_HTML_LINK_CLICKED = wx.PyEventBinder( wxEVT_COMMAND_HTML_LINK_CLICKED, 1 )
class HtmlHelpFrame(_windows.Frame):
"""Proxy of C++ HtmlHelpFrame class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int ?, String title=EmptyString, int style=wxHF_DEFAULTSTYLE,
HtmlHelpData data=None,
ConfigBase config=None, String rootpath=EmptyString) -> HtmlHelpFrame
"""
_html.HtmlHelpFrame_swiginit(self,_html.new_HtmlHelpFrame(*args, **kwargs))
self._setOORInfo(self)
def Create(*args, **kwargs):
"""
Create(self, Window parent, int id, String title=EmptyString, int style=HF_DEFAULT_STYLE,
ConfigBase config=None,
String rootpath=EmptyString) -> bool
"""
return _html.HtmlHelpFrame_Create(*args, **kwargs)
def GetData(*args, **kwargs):
"""GetData(self) -> HtmlHelpData"""
return _html.HtmlHelpFrame_GetData(*args, **kwargs)
def SetTitleFormat(*args, **kwargs):
"""SetTitleFormat(self, String format)"""
return _html.HtmlHelpFrame_SetTitleFormat(*args, **kwargs)
def AddGrabIfNeeded(*args, **kwargs):
"""AddGrabIfNeeded(self)"""
return _html.HtmlHelpFrame_AddGrabIfNeeded(*args, **kwargs)
def SetShouldPreventAppExit(*args, **kwargs):
"""SetShouldPreventAppExit(self, bool enable)"""
return _html.HtmlHelpFrame_SetShouldPreventAppExit(*args, **kwargs)
def GetController(*args, **kwargs):
"""GetController(self) -> HtmlHelpController"""
return _html.HtmlHelpFrame_GetController(*args, **kwargs)
def SetController(*args, **kwargs):
"""SetController(self, HtmlHelpController controller)"""
return _html.HtmlHelpFrame_SetController(*args, **kwargs)
def GetHelpWindow(*args, **kwargs):
"""GetHelpWindow(self) -> HtmlHelpWindow"""
return _html.HtmlHelpFrame_GetHelpWindow(*args, **kwargs)
# For compatibility from before the refactor
def Display(self, x):
return self.GetHelpWindow().Display(x)
def DisplayID(self, x):
return self.GetHelpWindow().DisplayID(id)
def DisplayContents(self):
return self.GetHelpWindow().DisplayContents()
def DisplayIndex(self):
return self.GetHelpWindow().DisplayIndex()
def KeywordSearch(self, keyword):
return self.GetHelpWindow().KeywordSearch(keyword)
def UseConfig(self, config, rootpath=""):
return self.GetHelpWindow().UseConfig(config, rootpath)
def ReadCustomization(self, config, rootpath=""):
return self.GetHelpWindow().ReadCustomization(config, rootpath)
def WriteCustomization(self, config, rootpath=""):
return self.GetHelpWindow().WriteCustomization(config, rootpath)
Controller = property(GetController,SetController,doc="See `GetController` and `SetController`")
Data = property(GetData,doc="See `GetData`")
HelpWindow = property(GetHelpWindow,doc="See `GetHelpWindow`")
_html.HtmlHelpFrame_swigregister(HtmlHelpFrame)
def PreHtmlHelpFrame(*args, **kwargs):
"""PreHtmlHelpFrame(HtmlHelpData data=None) -> HtmlHelpFrame"""
val = _html.new_PreHtmlHelpFrame(*args, **kwargs)
self._setOORInfo(self)
return val
class HtmlHelpDialog(_windows.Dialog):
"""Proxy of C++ HtmlHelpDialog class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int ?, String title=EmptyString, int style=HF_DEFAULT_STYLE,
HtmlHelpData data=None) -> HtmlHelpDialog
"""
_html.HtmlHelpDialog_swiginit(self,_html.new_HtmlHelpDialog(*args, **kwargs))
self._setOORInfo(self)
def Create(*args, **kwargs):
"""Create(self, Window parent, int id, String title=EmptyString, int style=HF_DEFAULT_STYLE) -> bool"""
return _html.HtmlHelpDialog_Create(*args, **kwargs)
def GetData(*args, **kwargs):
"""GetData(self) -> HtmlHelpData"""
return _html.HtmlHelpDialog_GetData(*args, **kwargs)
def GetController(*args, **kwargs):
"""GetController(self) -> HtmlHelpController"""
return _html.HtmlHelpDialog_GetController(*args, **kwargs)
def SetController(*args, **kwargs):
"""SetController(self, HtmlHelpController controller)"""
return _html.HtmlHelpDialog_SetController(*args, **kwargs)
def GetHelpWindow(*args, **kwargs):
"""GetHelpWindow(self) -> HtmlHelpWindow"""
return _html.HtmlHelpDialog_GetHelpWindow(*args, **kwargs)
def SetTitleFormat(*args, **kwargs):
"""SetTitleFormat(self, String format)"""
return _html.HtmlHelpDialog_SetTitleFormat(*args, **kwargs)
Controller = property(GetController,SetController,doc="See `GetController` and `SetController`")
Data = property(GetData,doc="See `GetData`")
HelpWindow = property(GetHelpWindow,doc="See `GetHelpWindow`")
_html.HtmlHelpDialog_swigregister(HtmlHelpDialog)
def PreHtmlHelpDialog(*args, **kwargs):
"""PreHtmlHelpDialog(HtmlHelpData data=None) -> HtmlHelpDialog"""
val = _html.new_PreHtmlHelpDialog(*args, **kwargs)
self._setOORInfo(self)
return val
class HelpControllerBase(_core.Object):
"""Proxy of C++ HelpControllerBase class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def Initialize(*args):
"""
Initialize(self, String file, int server) -> bool
Initialize(self, String file) -> bool
"""
return _html.HelpControllerBase_Initialize(*args)
def SetViewer(*args, **kwargs):
"""SetViewer(self, String viewer, long flags=0)"""
return _html.HelpControllerBase_SetViewer(*args, **kwargs)
def LoadFile(*args, **kwargs):
"""LoadFile(self, String file=wxEmptyString) -> bool"""
return _html.HelpControllerBase_LoadFile(*args, **kwargs)
def DisplayContents(*args, **kwargs):
"""DisplayContents(self) -> bool"""
return _html.HelpControllerBase_DisplayContents(*args, **kwargs)
def DisplayContextPopup(*args, **kwargs):
"""DisplayContextPopup(self, int contextId) -> bool"""
return _html.HelpControllerBase_DisplayContextPopup(*args, **kwargs)
def DisplayTextPopup(*args, **kwargs):
"""DisplayTextPopup(self, String text, Point pos) -> bool"""
return _html.HelpControllerBase_DisplayTextPopup(*args, **kwargs)
def DisplaySection(*args):
"""
DisplaySection(self, int sectionNo) -> bool
DisplaySection(self, String section) -> bool
"""
return _html.HelpControllerBase_DisplaySection(*args)
def DisplayBlock(*args, **kwargs):
"""DisplayBlock(self, long blockNo) -> bool"""
return _html.HelpControllerBase_DisplayBlock(*args, **kwargs)
def KeywordSearch(*args, **kwargs):
"""KeywordSearch(self, String k, wxHelpSearchMode mode=wxHELP_SEARCH_ALL) -> bool"""
return _html.HelpControllerBase_KeywordSearch(*args, **kwargs)
def SetFrameParameters(*args, **kwargs):
"""
SetFrameParameters(self, String title, Size size, Point pos=DefaultPosition,
bool newFrameEachTime=False)
"""
return _html.HelpControllerBase_SetFrameParameters(*args, **kwargs)
def Quit(*args, **kwargs):
"""Quit(self) -> bool"""
return _html.HelpControllerBase_Quit(*args, **kwargs)
def OnQuit(*args, **kwargs):
"""OnQuit(self)"""
return _html.HelpControllerBase_OnQuit(*args, **kwargs)
def SetParentWindow(*args, **kwargs):
"""SetParentWindow(self, Window win)"""
return _html.HelpControllerBase_SetParentWindow(*args, **kwargs)
def GetParentWindow(*args, **kwargs):
"""GetParentWindow(self) -> Window"""
return _html.HelpControllerBase_GetParentWindow(*args, **kwargs)
ParentWindow = property(GetParentWindow,SetParentWindow,doc="See `GetParentWindow` and `SetParentWindow`")
_html.HelpControllerBase_swigregister(HelpControllerBase)
class HtmlHelpController(HelpControllerBase):
"""Proxy of C++ HtmlHelpController class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, int style=HF_DEFAULT_STYLE, Window parentWindow=None) -> HtmlHelpController"""
_html.HtmlHelpController_swiginit(self,_html.new_HtmlHelpController(*args, **kwargs))
__swig_destroy__ = _html.delete_HtmlHelpController
__del__ = lambda self : None;
def SetShouldPreventAppExit(*args, **kwargs):
"""SetShouldPreventAppExit(self, bool enable)"""
return _html.HtmlHelpController_SetShouldPreventAppExit(*args, **kwargs)
def GetHelpWindow(*args, **kwargs):
"""GetHelpWindow(self) -> HtmlHelpWindow"""
return _html.HtmlHelpController_GetHelpWindow(*args, **kwargs)
def SetHelpWindow(*args, **kwargs):
"""SetHelpWindow(self, HtmlHelpWindow helpWindow)"""
return _html.HtmlHelpController_SetHelpWindow(*args, **kwargs)
def GetFrame(*args, **kwargs):
"""GetFrame(self) -> HtmlHelpFrame"""
return _html.HtmlHelpController_GetFrame(*args, **kwargs)
def GetDialog(*args, **kwargs):
"""GetDialog(self) -> HtmlHelpDialog"""
return _html.HtmlHelpController_GetDialog(*args, **kwargs)
def SetTitleFormat(*args, **kwargs):
"""SetTitleFormat(self, String format)"""
return _html.HtmlHelpController_SetTitleFormat(*args, **kwargs)
def SetTempDir(*args, **kwargs):
"""SetTempDir(self, String path)"""
return _html.HtmlHelpController_SetTempDir(*args, **kwargs)
def AddBook(*args, **kwargs):
"""AddBook(self, String book, int show_wait_msg=False) -> bool"""
return _html.HtmlHelpController_AddBook(*args, **kwargs)
def Display(*args, **kwargs):
"""Display(self, String x)"""
return _html.HtmlHelpController_Display(*args, **kwargs)
def DisplayID(*args, **kwargs):
"""DisplayID(self, int id)"""
return _html.HtmlHelpController_DisplayID(*args, **kwargs)
def DisplayContents(*args, **kwargs):
"""DisplayContents(self)"""
return _html.HtmlHelpController_DisplayContents(*args, **kwargs)
def DisplayIndex(*args, **kwargs):
"""DisplayIndex(self)"""
return _html.HtmlHelpController_DisplayIndex(*args, **kwargs)
def KeywordSearch(*args, **kwargs):
"""KeywordSearch(self, String keyword) -> bool"""
return _html.HtmlHelpController_KeywordSearch(*args, **kwargs)
def UseConfig(*args, **kwargs):
"""UseConfig(self, ConfigBase config, String rootpath=EmptyString)"""
return _html.HtmlHelpController_UseConfig(*args, **kwargs)
def ReadCustomization(*args, **kwargs):
"""ReadCustomization(self, ConfigBase cfg, String path=EmptyString)"""
return _html.HtmlHelpController_ReadCustomization(*args, **kwargs)
def WriteCustomization(*args, **kwargs):
"""WriteCustomization(self, ConfigBase cfg, String path=EmptyString)"""
return _html.HtmlHelpController_WriteCustomization(*args, **kwargs)
def MakeModalIfNeeded(*args, **kwargs):
"""MakeModalIfNeeded(self)"""
return _html.HtmlHelpController_MakeModalIfNeeded(*args, **kwargs)
def FindTopLevelWindow(*args, **kwargs):
"""FindTopLevelWindow(self) -> Window"""
return _html.HtmlHelpController_FindTopLevelWindow(*args, **kwargs)
Dialog = property(GetDialog,doc="See `GetDialog`")
Frame = property(GetFrame,doc="See `GetFrame`")
HelpWindow = property(GetHelpWindow,SetHelpWindow,doc="See `GetHelpWindow` and `SetHelpWindow`")
_html.HtmlHelpController_swigregister(HtmlHelpController)
class HtmlModalHelp(object):
"""Proxy of C++ HtmlModalHelp class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, String helpFile, String topic=wxEmptyString,
int style=wxHF_DEFAULT_STYLE|wxHF_DIALOG|wxHF_MODAL) -> HtmlModalHelp
"""
_html.HtmlModalHelp_swiginit(self,_html.new_HtmlModalHelp(*args, **kwargs))
_html.HtmlModalHelp_swigregister(HtmlModalHelp)
|
DaniilLeksin/gc
|
wx/html.py
|
Python
|
apache-2.0
| 86,366
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
# JIRA specific resources
from jira.resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from jira.resources import GreenHopperResource, Board, Sprint
from jira.resilientsession import ResilientSession
from jira import __version__
from jira.utils import threaded_requests, json_loads, JIRAError, CaseInsensitiveDict
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, validate=None, async=False,
logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
#url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'application/octet-stream')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=None):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to system property jira.ajax.autocomplete.limit (20)
"""
params = {}
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
return self._get_json('groups/picker', params=params)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in self.applicationlinks():
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in self.applicationlinks():
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = self.transitions(issue)
id = None
for transition in transitions_json:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
cleanup = lambda _: _magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
url = self._options['server'] + '/secure/admin/DeleteProject.jspa'
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def create_project(self, key, name=None, assignee=None):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None, fullname=None, sendEmail=False, active=True):
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
# if state:
# payload['state']=state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exsit anymore and we need to use
# issue.update() to perform this operaiton
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
data = {}
data['issueKeys'] = issue_keys
url = self._get_url('sprint/%s/issues/add' %
(sprint_id), base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank' and field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
self._rank = None
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
|
awurster/jira
|
jira/client.py
|
Python
|
bsd-2-clause
| 100,625
|
"""Clears the Cache"""
from django.core.cache import cache
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Clears the Cache
"""
help = "Clears the Cache"
def handle(self, **options):
"""Clears the Cache"""
cache.clear()
self.stdout.write('Cleared cache\n')
|
podiobooks/podiobooks
|
podiobooks/management/commands/clear_cache.py
|
Python
|
gpl-3.0
| 345
|
# -*- coding: utf-8 -*-
SFDC_COUNTRIES = {
'af': 'Afghanistan',
'ax': 'Aland Islands',
'al': 'Albania',
'dz': 'Algeria',
'as': 'American Samoa',
'ad': 'Andorra',
'ao': 'Angola',
'ai': 'Anguilla',
'aq': 'Antarctica',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'am': 'Armenia',
'aw': 'Aruba',
'au': 'Australia',
'at': 'Austria',
'az': 'Azerbaijan',
'bs': 'Bahamas',
'bh': 'Bahrain',
'bd': 'Bangladesh',
'bb': 'Barbados',
'by': 'Belarus',
'be': 'Belgium',
'bz': 'Belize',
'bj': 'Benin',
'bm': 'Bermuda',
'bt': 'Bhutan',
'bo': 'Bolivia, Plurinational State of',
'bq': 'Bonaire, Sint Eustatius and Saba',
'ba': 'Bosnia and Herzegovina',
'bw': 'Botswana',
'bv': 'Bouvet Island',
'br': 'Brazil',
'io': 'British Indian Ocean Territory',
'bn': 'Brunei Darussalam',
'bg': 'Bulgaria',
'bf': 'Burkina Faso',
'bi': 'Burundi',
'kh': 'Cambodia',
'cm': 'Cameroon',
'ca': 'Canada',
'cv': 'Cape Verde',
'ky': 'Cayman Islands',
'cf': 'Central African Republic',
'td': 'Chad',
'cl': 'Chile',
'cn': 'China',
'cx': 'Christmas Island',
'cc': 'Cocos (Keeling) Islands',
'co': 'Colombia',
'km': 'Comoros',
'cg': 'Congo',
'cd': 'Congo, the Democratic Republic of the',
'ck': 'Cook Islands',
'cr': 'Costa Rica',
'ci': 'Cote d\'Ivoire',
'hr': 'Croatia',
'cu': 'Cuba',
'cw': 'Curaçao',
'cy': 'Cyprus',
'cz': 'Czech Republic',
'dk': 'Denmark',
'dj': 'Djibouti',
'dm': 'Dominica',
'do': 'Dominican Republic',
'ec': 'Ecuador',
'eg': 'Egypt',
'sv': 'El Salvador',
'gq': 'Equatorial Guinea',
'er': 'Eritrea',
'ee': 'Estonia',
'et': 'Ethiopia',
'fk': 'Falkland Islands (Malvinas)',
'fo': 'Faroe Islands',
'fj': 'Fiji',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'pf': 'French Polynesia',
'tf': 'French Southern Territories',
'ga': 'Gabon',
'gm': 'Gambia',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gi': 'Gibraltar',
'gr': 'Greece',
'gl': 'Greenland',
'gd': 'Grenada',
'gp': 'Guadeloupe',
'gu': 'Guam',
'gt': 'Guatemala',
'gg': 'Guernsey',
'gn': 'Guinea',
'gw': 'Guinea-Bissau',
'gy': 'Guyana',
'ht': 'Haiti',
'hm': 'Heard Island and McDonald Islands',
'va': 'Holy See (Vatican City State)',
'hn': 'Honduras',
'hk': 'Hong Kong',
'hu': 'Hungary',
'is': 'Iceland',
'in': 'India',
'id': 'Indonesia',
'ir': 'Iran, Islamic Republic of',
'iq': 'Iraq',
'ie': 'Ireland',
'im': 'Isle of Man',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'je': 'Jersey',
'jo': 'Jordan',
'kz': 'Kazakhstan',
'ke': 'Kenya',
'ki': 'Kiribati',
'kp': 'Korea, Democratic People\'s Republic of',
'kr': 'Korea, Republic of',
'kw': 'Kuwait',
'kg': 'Kyrgyzstan',
'la': 'Lao People\'s Democratic Republic',
'lv': 'Latvia',
'lb': 'Lebanon',
'ls': 'Lesotho',
'lr': 'Liberia',
'ly': 'Libya',
'li': 'Liechtenstein',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'mo': 'Macao',
'mk': 'Macedonia, the former Yugoslav Republic of',
'mg': 'Madagascar',
'mw': 'Malawi',
'my': 'Malaysia',
'mv': 'Maldives',
'ml': 'Mali',
'mt': 'Malta',
'mh': 'Marshall Islands',
'mq': 'Martinique',
'mr': 'Mauritania',
'mu': 'Mauritius',
'yt': 'Mayotte',
'mx': 'Mexico',
'fm': 'Micronesia',
'md': 'Moldova, Republic of',
'mc': 'Monaco',
'mn': 'Mongolia',
'me': 'Montenegro',
'ms': 'Montserrat',
'ma': 'Morocco',
'mz': 'Mozambique',
'mm': 'Myanmar',
'na': 'Namibia',
'nr': 'Nauru',
'np': 'Nepal',
'nl': 'Netherlands',
'an': 'Netherlands Antilles',
'nc': 'New Caledonia',
'nz': 'New Zealand',
'ni': 'Nicaragua',
'ne': 'Niger',
'ng': 'Nigeria',
'nu': 'Niue',
'nf': 'Norfolk Island',
'mp': 'Northern Mariana Islands',
'no': 'Norway',
'om': 'Oman',
'pk': 'Pakistan',
'pw': 'Palau',
'ps': 'Palestine',
'pa': 'Panama',
'pg': 'Papua New Guinea',
'py': 'Paraguay',
'pe': 'Peru',
'ph': 'Philippines',
'pn': 'Pitcairn',
'pl': 'Poland',
'pt': 'Portugal',
'pr': 'Puerto Rico',
'qa': 'Qatar',
're': 'Reunion',
'ro': 'Romania',
'ru': 'Russian Federation',
'rw': 'Rwanda',
'bl': 'Saint Barthélemy',
'sh': 'Saint Helena, Ascension and Tristan da Cunha',
'kn': 'Saint Kitts and Nevis',
'lc': 'Saint Lucia',
'mf': 'Saint Martin (French part)',
'pm': 'Saint Pierre and Miquelon',
'vc': 'Saint Vincent and the Grenadines',
'ws': 'Samoa',
'sm': 'San Marino',
'st': 'Sao Tome and Principe',
'sa': 'Saudi Arabia',
'sn': 'Senegal',
'rs': 'Serbia',
'sc': 'Seychelles',
'sl': 'Sierra Leone',
'sg': 'Singapore',
'sx': 'Sint Maarten (Dutch part)',
'sk': 'Slovakia',
'si': 'Slovenia',
'sb': 'Solomon Islands',
'so': 'Somalia',
'za': 'South Africa',
'gs': 'South Georgia and the South Sandwich Islands',
'ss': 'South Sudan',
'es': 'Spain',
'lk': 'Sri Lanka',
'sd': 'Sudan',
'sr': 'Suriname',
'sj': 'Svalbard and Jan Mayen',
'sz': 'Swaziland',
'se': 'Sweden',
'ch': 'Switzerland',
'sy': 'Syrian Arab Republic',
'tw': 'Taiwan',
'tj': 'Tajikistan',
'tz': 'Tanzania, United Republic of',
'th': 'Thailand',
'tl': 'Timor-Leste',
'tg': 'Togo',
'tk': 'Tokelau',
'to': 'Tonga',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'tm': 'Turkmenistan',
'tc': 'Turks and Caicos Islands',
'tv': 'Tuvalu',
'vi': 'U.S. Virgin Islands',
'ug': 'Uganda',
'ua': 'Ukraine',
'ae': 'United Arab Emirates',
'gb': 'United Kingdom',
'us': 'United States',
'um': 'United States Minor Outlying Islands',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'vu': 'Vanuatu',
've': 'Venezuela, Bolivarian Republic of',
'vn': 'Viet Nam',
'vg': 'Virgin Islands, British',
'wf': 'Wallis and Futuna',
'eh': 'Western Sahara',
'ye': 'Yemen',
'zm': 'Zambia',
'zw': 'Zimbabwe',
}
SFDC_COUNTRIES_LIST = list(SFDC_COUNTRIES.keys())
COUNTRY_CODES_MAP = {
'afg': 'af',
'ala': 'ax',
'alb': 'al',
'dza': 'dz',
'asm': 'as',
'and': 'ad',
'ago': 'ao',
'aia': 'ai',
'ata': 'aq',
'atg': 'ag',
'arg': 'ar',
'arm': 'am',
'abw': 'aw',
'aus': 'au',
'aut': 'at',
'aze': 'az',
'bhs': 'bs',
'bhr': 'bh',
'bgd': 'bd',
'brb': 'bb',
'blr': 'by',
'bel': 'be',
'blz': 'bz',
'ben': 'bj',
'bmu': 'bm',
'btn': 'bt',
'bol': 'bo',
'bih': 'ba',
'bwa': 'bw',
'bvt': 'bv',
'bra': 'br',
'vgb': 'vg',
'iot': 'io',
'brn': 'bn',
'bgr': 'bg',
'bfa': 'bf',
'bdi': 'bi',
'khm': 'kh',
'cmr': 'cm',
'can': 'ca',
'cpv': 'cv',
'cym': 'ky',
'caf': 'cf',
'tcd': 'td',
'chl': 'cl',
'chn': 'cn',
'hkg': 'hk',
'mac': 'mo',
'cxr': 'cx',
'cck': 'cc',
'col': 'co',
'com': 'km',
'cog': 'cg',
'cod': 'cd',
'cok': 'ck',
'cri': 'cr',
'civ': 'ci',
'hrv': 'hr',
'cub': 'cu',
'cyp': 'cy',
'cze': 'cz',
'dnk': 'dk',
'dji': 'dj',
'dma': 'dm',
'dom': 'do',
'ecu': 'ec',
'egy': 'eg',
'slv': 'sv',
'gnq': 'gq',
'eri': 'er',
'est': 'ee',
'eth': 'et',
'flk': 'fk',
'fro': 'fo',
'fji': 'fj',
'fin': 'fi',
'fra': 'fr',
'guf': 'gf',
'pyf': 'pf',
'atf': 'tf',
'gab': 'ga',
'gmb': 'gm',
'geo': 'ge',
'deu': 'de',
'gha': 'gh',
'gib': 'gi',
'grc': 'gr',
'grl': 'gl',
'grd': 'gd',
'glp': 'gp',
'gum': 'gu',
'gtm': 'gt',
'ggy': 'gg',
'gin': 'gn',
'gnb': 'gw',
'guy': 'gy',
'hti': 'ht',
'hmd': 'hm',
'vat': 'va',
'hnd': 'hn',
'hun': 'hu',
'isl': 'is',
'ind': 'in',
'idn': 'id',
'irn': 'ir',
'irq': 'iq',
'irl': 'ie',
'imn': 'im',
'isr': 'il',
'ita': 'it',
'jam': 'jm',
'jpn': 'jp',
'jey': 'je',
'jor': 'jo',
'kaz': 'kz',
'ken': 'ke',
'kir': 'ki',
'prk': 'kp',
'kor': 'kr',
'kwt': 'kw',
'kgz': 'kg',
'lao': 'la',
'lva': 'lv',
'lbn': 'lb',
'lso': 'ls',
'lbr': 'lr',
'lby': 'ly',
'lie': 'li',
'ltu': 'lt',
'lux': 'lu',
'mkd': 'mk',
'mdg': 'mg',
'mwi': 'mw',
'mys': 'my',
'mdv': 'mv',
'mli': 'ml',
'mlt': 'mt',
'mhl': 'mh',
'mtq': 'mq',
'mrt': 'mr',
'mus': 'mu',
'myt': 'yt',
'mex': 'mx',
'fsm': 'fm',
'mda': 'md',
'mco': 'mc',
'mng': 'mn',
'mne': 'me',
'msr': 'ms',
'mar': 'ma',
'moz': 'mz',
'mmr': 'mm',
'nam': 'na',
'nru': 'nr',
'npl': 'np',
'nld': 'nl',
'ant': 'an',
'ncl': 'nc',
'nzl': 'nz',
'nic': 'ni',
'ner': 'ne',
'nga': 'ng',
'niu': 'nu',
'nfk': 'nf',
'mnp': 'mp',
'nor': 'no',
'omn': 'om',
'pak': 'pk',
'plw': 'pw',
'pse': 'ps',
'pan': 'pa',
'png': 'pg',
'pry': 'py',
'per': 'pe',
'phl': 'ph',
'pcn': 'pn',
'pol': 'pl',
'prt': 'pt',
'pri': 'pr',
'qat': 'qa',
'reu': 're',
'rou': 'ro',
'rus': 'ru',
'rwa': 'rw',
'blm': 'bl',
'shn': 'sh',
'kna': 'kn',
'lca': 'lc',
'maf': 'mf',
'spm': 'pm',
'vct': 'vc',
'wsm': 'ws',
'smr': 'sm',
'stp': 'st',
'sau': 'sa',
'sen': 'sn',
'srb': 'rs',
'syc': 'sc',
'sle': 'sl',
'sgp': 'sg',
'svk': 'sk',
'svn': 'si',
'slb': 'sb',
'som': 'so',
'zaf': 'za',
'sgs': 'gs',
'ssd': 'ss',
'esp': 'es',
'lka': 'lk',
'sdn': 'sd',
'sur': 'sr',
'sjm': 'sj',
'swz': 'sz',
'swe': 'se',
'che': 'ch',
'syr': 'sy',
'twn': 'tw',
'tjk': 'tj',
'tza': 'tz',
'tha': 'th',
'tls': 'tl',
'tgo': 'tg',
'tkl': 'tk',
'ton': 'to',
'tto': 'tt',
'tun': 'tn',
'tur': 'tr',
'tkm': 'tm',
'tca': 'tc',
'tuv': 'tv',
'uga': 'ug',
'ukr': 'ua',
'are': 'ae',
'gbr': 'gb',
'usa': 'us',
'umi': 'um',
'ury': 'uy',
'uzb': 'uz',
'vut': 'vu',
'ven': 've',
'vnm': 'vn',
'vir': 'vi',
'wlf': 'wf',
'esh': 'eh',
'yem': 'ye',
'zmb': 'zm',
'zwe': 'zw',
}
def convert_country_3_to_2(ccode):
ccode = ccode.lower()
return COUNTRY_CODES_MAP.get(ccode, None)
|
glogiotatidis/basket
|
basket/news/country_codes.py
|
Python
|
mpl-2.0
| 10,229
|
# -*- coding: utf-8 -*-
"""The :class:`Schema` class, including its metaclass and options (class Meta)."""
from __future__ import absolute_import, unicode_literals
from collections import defaultdict, Mapping
import copy
import datetime as dt
import decimal
import inspect
import json
import types
import uuid
import warnings
from collections import namedtuple
from functools import partial
from marshmallow import base, fields, utils, class_registry, marshalling
from marshmallow.compat import (with_metaclass, iteritems, text_type,
binary_type, OrderedDict)
from marshmallow.orderedset import OrderedSet
from marshmallow.decorators import (PRE_DUMP, POST_DUMP, PRE_LOAD, POST_LOAD,
VALIDATES, VALIDATES_SCHEMA)
#: Return type of :meth:`Schema.dump` including serialized data and errors
MarshalResult = namedtuple('MarshalResult', ['data', 'errors'])
#: Return type of :meth:`Schema.load`, including deserialized data and errors
UnmarshalResult = namedtuple('UnmarshalResult', ['data', 'errors'])
def _get_fields(attrs, field_class, pop=False, ordered=False):
"""Get fields from a class. If ordered=True, fields will sorted by creation index.
:param attrs: Mapping of class attributes
:param type field_class: Base field class
:param bool pop: Remove matching fields
"""
getter = getattr(attrs, 'pop' if pop else 'get')
fields = [
(field_name, getter(field_name))
for field_name, field_value in list(iteritems(attrs))
if utils.is_instance_or_subclass(field_value, field_class)
]
if ordered:
return sorted(
fields,
key=lambda pair: pair[1]._creation_index,
)
else:
return fields
# This function allows Schemas to inherit from non-Schema classes and ensures
# inheritance according to the MRO
def _get_fields_by_mro(klass, field_class, ordered=False):
"""Collect fields from a class, following its method resolution order. The
class itself is excluded from the search; only its parents are checked. Get
fields from ``_declared_fields`` if available, else use ``__dict__``.
:param type klass: Class whose fields to retrieve
:param type field_class: Base field class
"""
mro = inspect.getmro(klass)
# Loop over mro in reverse to maintain correct order of fields
return sum(
(
_get_fields(
getattr(base, '_declared_fields', base.__dict__),
field_class,
ordered=ordered
)
for base in mro[:0:-1]
),
[],
)
class SchemaMeta(type):
"""Metaclass for the Schema class. Binds the declared fields to
a ``_declared_fields`` attribute, which is a dictionary mapping attribute
names to field objects. Also sets the ``opts`` class attribute, which is
the Schema class's ``class Meta`` options.
"""
FUNC_LISTS = ('__validators__', '__data_handlers__', '__preprocessors__')
def __new__(mcs, name, bases, attrs):
meta = attrs.get('Meta')
ordered = getattr(meta, 'ordered', False)
if not ordered:
# Inherit 'ordered' option
# Warning: We loop through bases instead of MRO because we don't
# yet have access to the class object
# (i.e. can't call super before we have fields)
for base_ in bases:
if hasattr(base_, 'Meta') and hasattr(base_.Meta, 'ordered'):
ordered = base_.Meta.ordered
break
else:
ordered = False
cls_fields = _get_fields(attrs, base.FieldABC, pop=True, ordered=ordered)
klass = super(SchemaMeta, mcs).__new__(mcs, name, bases, attrs)
inherited_fields = _get_fields_by_mro(klass, base.FieldABC)
# Use getattr rather than attrs['Meta'] so that we get inheritance for free
meta = getattr(klass, 'Meta')
# Set klass.opts in __new__ rather than __init__ so that it is accessible in
# get_declared_fields
klass.opts = klass.OPTIONS_CLASS(meta)
# Add fields specifid in the `include` class Meta option
cls_fields += list(klass.opts.include.items())
dict_cls = OrderedDict if ordered else dict
# Assign _declared_fields on class
klass._declared_fields = mcs.get_declared_fields(
klass=klass,
cls_fields=cls_fields,
inherited_fields=inherited_fields,
dict_cls=dict_cls
)
return klass
@classmethod
def get_declared_fields(mcs, klass, cls_fields, inherited_fields, dict_cls):
"""Returns a dictionary of field_name => `Field` pairs declard on the class.
This is exposed mainly so that plugins can add additional fields, e.g. fields
computed from class Meta options.
:param type klass: The class object.
:param dict cls_fields: The fields declared on the class, including those added
by the ``include`` class Meta option.
:param dict inherited_fileds: Inherited fields.
:param type dict_class: Either `dict` or `OrderedDict`, depending on the whether
the user specified `ordered=True`.
"""
return dict_cls(inherited_fields + cls_fields)
# NOTE: self is the class object
def __init__(self, name, bases, attrs):
super(SchemaMeta, self).__init__(name, bases, attrs)
class_registry.register(name, self)
self._copy_func_attrs()
self._resolve_processors()
def _copy_func_attrs(self):
"""Copy non-shareable class function lists
Need to copy validators, data handlers, and preprocessors lists so they
are not shared among subclasses and ancestors.
"""
for attr in self.FUNC_LISTS:
attr_copy = copy.copy(getattr(self, attr))
setattr(self, attr, attr_copy)
def _resolve_processors(self):
"""Add in the decorated processors
By doing this after constructing the class, we let standard inheritance
do all the hard work.
"""
mro = inspect.getmro(self)
self.__processors__ = defaultdict(list)
for attr_name in dir(self):
# Need to look up the actual descriptor, not whatever might be
# bound to the class. This needs to come from the __dict__ of the
# declaring class.
for parent in mro:
try:
attr = parent.__dict__[attr_name]
except KeyError:
continue
else:
break
else:
# In case we didn't find the attribute and didn't break above.
# We should never hit this - it's just here for completeness
# to exclude the possibility of attr being undefined.
continue
try:
processor_tags = attr.__marshmallow_tags__
except AttributeError:
continue
for tag in processor_tags:
# Use name here so we can get the bound method later, in case
# the processor was a descriptor or something.
self.__processors__[tag].append(attr_name)
class SchemaOpts(object):
"""class Meta options for the :class:`Schema`. Defines defaults."""
def __init__(self, meta):
self.fields = getattr(meta, 'fields', ())
if not isinstance(self.fields, (list, tuple)):
raise ValueError("`fields` option must be a list or tuple.")
self.additional = getattr(meta, 'additional', ())
if not isinstance(self.additional, (list, tuple)):
raise ValueError("`additional` option must be a list or tuple.")
if self.fields and self.additional:
raise ValueError("Cannot set both `fields` and `additional` options"
" for the same Schema.")
self.exclude = getattr(meta, 'exclude', ())
if not isinstance(self.exclude, (list, tuple)):
raise ValueError("`exclude` must be a list or tuple.")
self.strict = getattr(meta, 'strict', False)
self.dateformat = getattr(meta, 'dateformat', None)
self.json_module = getattr(meta, 'json_module', json)
if hasattr(meta, 'skip_missing'):
warnings.warn(
'The skip_missing option is no longer necessary. Missing inputs passed to '
'Schema.dump will be excluded from the serialized output by default.',
UserWarning
)
self.ordered = getattr(meta, 'ordered', False)
self.index_errors = getattr(meta, 'index_errors', True)
self.include = getattr(meta, 'include', {})
self.load_only = getattr(meta, 'load_only', ())
self.dump_only = getattr(meta, 'dump_only', ())
class BaseSchema(base.SchemaABC):
"""Base schema class with which to define custom schemas.
Example usage:
.. code-block:: python
import datetime as dt
from marshmallow import Schema, fields
class Album(object):
def __init__(self, title, release_date):
self.title = title
self.release_date = release_date
class AlbumSchema(Schema):
title = fields.Str()
release_date = fields.Date()
# Or, equivalently
class AlbumSchema2(Schema):
class Meta:
fields = ("title", "release_date")
album = Album("Beggars Banquet", dt.date(1968, 12, 6))
schema = AlbumSchema()
data, errors = schema.dump(album)
data # {'release_date': '1968-12-06', 'title': 'Beggars Banquet'}
:param dict extra: A dict of extra attributes to bind to the serialized result.
:param tuple only: A list or tuple of fields to serialize. If `None`, all
fields will be serialized.
:param tuple exclude: A list or tuple of fields to exclude from the
serialized result.
:param str prefix: Optional prefix that will be prepended to all the
serialized field names.
:param bool strict: If `True`, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param bool many: Should be set to `True` if ``obj`` is a collection
so that the object will be serialized to a list.
:param bool skip_missing: If `True`, don't include key:value pairs in
serialized results if ``value`` is `None`.
:param dict context: Optional context passed to :class:`fields.Method` and
:class:`fields.Function` fields.
:param tuple load_only: A list or tuple of fields to skip during serialization
:param tuple dump_only: A list or tuple of fields to skip during
deserialization, read-only fields
"""
TYPE_MAPPING = {
text_type: fields.String,
binary_type: fields.String,
dt.datetime: fields.DateTime,
float: fields.Float,
bool: fields.Boolean,
tuple: fields.Raw,
list: fields.Raw,
set: fields.Raw,
int: fields.Integer,
uuid.UUID: fields.UUID,
dt.time: fields.Time,
dt.date: fields.Date,
dt.timedelta: fields.TimeDelta,
decimal.Decimal: fields.Decimal,
}
OPTIONS_CLASS = SchemaOpts
#: Custom error handler function. May be `None`.
__error_handler__ = None
# NOTE: The below class attributes must initially be `None` so that
# every subclass references a different list of functions
#: List of registered post-processing functions.
__data_handlers__ = None
#: List of registered schema-level validation functions.
__validators__ = None
#: List of registered pre-processing functions.
__preprocessors__ = None
#: Function used to get values of an object.
__accessor__ = None
class Meta(object):
"""Options object for a Schema.
Example usage: ::
class Meta:
fields = ("id", "email", "date_created")
exclude = ("password", "secret_attribute")
Available options:
- ``fields``: Tuple or list of fields to include in the serialized result.
- ``additional``: Tuple or list of fields to include *in addition* to the
explicitly declared fields. ``additional`` and ``fields`` are
mutually-exclusive options.
- ``include``: Dictionary of additional fields to include in the schema. It is
usually better to define fields as class variables, but you may need to
use this option, e.g., if your fields are Python keywords. May be an
`OrderedDict`.
- ``exclude``: Tuple or list of fields to exclude in the serialized result.
- ``dateformat``: Date format for all DateTime fields that do not have their
date format explicitly specified.
- ``strict``: If `True`, raise errors during marshalling rather than
storing them.
- ``json_module``: JSON module to use for `loads` and `dumps`.
Defaults to the ``json`` module in the stdlib.
- ``ordered``: If `True`, order serialization output according to the
order in which fields were declared. Output of `Schema.dump` will be a
`collections.OrderedDict`.
- ``index_errors``: If `True`, errors dictionaries will include the index
of invalid items in a collection.
- ``load_only``: Tuple or list of fields to exclude from serialized results.
- ``dump_only``: Tuple or list of fields to exclude from deserialization
.. versionchanged:: 2.0.0
`__preprocessors__` and `__data_handlers__` are deprecated. Use
`marshmallow.decorators.pre_load` and `marshmallow.decorators.post_dump` instead.
"""
pass
def __init__(self, extra=None, only=(), exclude=(), prefix='', strict=False,
many=False, context=None, load_only=(), dump_only=()):
# copy declared fields from metaclass
self.declared_fields = copy.deepcopy(self._declared_fields)
self.many = many
self.only = only
self.exclude = exclude
self.prefix = prefix
self.strict = strict or self.opts.strict
self.ordered = self.opts.ordered
self.load_only = set(load_only) or set(self.opts.load_only)
self.dump_only = set(dump_only) or set(self.opts.dump_only)
#: Dictionary mapping field_names -> :class:`Field` objects
self.fields = self.dict_class()
#: Callable marshalling object
self._marshal = marshalling.Marshaller(
prefix=self.prefix
)
#: Callable unmarshalling object
self._unmarshal = marshalling.Unmarshaller()
self.extra = extra
self.context = context or {}
self._update_fields(many=many)
def __repr__(self):
return '<{ClassName}(many={self.many}, strict={self.strict})>'.format(
ClassName=self.__class__.__name__, self=self
)
def _postprocess(self, data, many, obj):
if self.extra:
if many:
for each in data:
each.update(self.extra)
else:
data.update(self.extra)
if self._marshal.errors and callable(self.__error_handler__):
self.__error_handler__(self._marshal.errors, obj)
# invoke registered callbacks
# NOTE: these callbacks will mutate the data
if self.__data_handlers__:
for callback in self.__data_handlers__:
if callable(callback):
data = callback(self, data, obj)
return data
@property
def dict_class(self):
return OrderedDict if self.ordered else dict
@property
def set_class(self):
return OrderedSet if self.ordered else set
##### Handler decorators #####
@classmethod
def error_handler(cls, func):
"""Decorator that registers an error handler function for the schema.
The function receives the :class:`Schema` instance, a dictionary of errors,
and the serialized object (if serializing data) or data dictionary (if
deserializing data) as arguments.
Example: ::
class UserSchema(Schema):
email = fields.Email()
@UserSchema.error_handler
def handle_errors(schema, errors, obj):
raise ValueError('An error occurred while marshalling {}'.format(obj))
user = User(email='invalid')
UserSchema().dump(user) # => raises ValueError
UserSchema().load({'email': 'bademail'}) # raises ValueError
.. versionadded:: 0.7.0
"""
cls.__error_handler__ = func
return func
@classmethod
def data_handler(cls, func):
"""Decorator that registers a post-processing function.
The function receives the :class:`Schema` instance, the serialized
data, and the original object as arguments and should return the
processed data.
.. versionadded:: 0.7.0
.. deprecated:: 2.0.0
Use `marshmallow.post_dump` instead.
"""
warnings.warn(
'Schema.data_handler is deprecated. Use the marshmallow.post_dump decorator '
'instead.', category=DeprecationWarning
)
cls.__data_handlers__ = cls.__data_handlers__ or []
cls.__data_handlers__.append(func)
return func
@classmethod
def validator(cls, func):
"""Decorator that registers a schema validation function to be applied during
deserialization. The function receives the :class:`Schema` instance and the
input data as arguments and should return `False` if validation fails.
.. versionadded:: 1.0
.. deprecated:: 2.0.0
Use `marshmallow.validates_schema <marshmallow.decorators.validates_schema>` instead.
"""
warnings.warn(
'Schema.validator is deprecated. Use the marshmallow.validates_schema decorator '
'instead.', category=DeprecationWarning
)
cls.__validators__ = cls.__validators__ or []
cls.__validators__.append(func)
return func
@classmethod
def preprocessor(cls, func):
"""Decorator that registers a preprocessing function to be applied during
deserialization. The function receives the :class:`Schema` instance and the
input data as arguments and should return the modified dictionary of data.
.. versionadded:: 1.0
.. deprecated:: 2.0.0
Use `marshmallow.pre_load` instead.
"""
warnings.warn(
'Schema.preprocessor is deprecated. Use the marshmallow.pre_load decorator '
'instead.', category=DeprecationWarning
)
cls.__preprocessors__ = cls.__preprocessors__ or []
cls.__preprocessors__.append(func)
return func
@classmethod
def accessor(cls, func):
"""Decorator that registers a function for pulling values from an object
to serialize. The function receives the :class:`Schema` instance, the
``key`` of the value to get, the ``obj`` to serialize, and an optional
``default`` value.
"""
cls.__accessor__ = func
return func
##### Serialization/Deserialization API #####
def dump(self, obj, many=None, update_fields=True, **kwargs):
"""Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:param bool update_fields: Whether to update the schema's field classes. Typically
set to `True`, but may be `False` when serializing a homogenous collection.
This parameter is used by `fields.Nested` to avoid multiple updates.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `MarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
many = self.many if many is None else bool(many)
if not many and utils.is_collection(obj) and not utils.is_keyed_tuple(obj):
warnings.warn('Implicit collection handling is deprecated. Set '
'many=True to serialize a collection.',
category=DeprecationWarning)
if isinstance(obj, types.GeneratorType):
obj = list(obj)
if update_fields:
self._update_fields(obj, many=many)
obj = self._invoke_dump_processors(PRE_DUMP, obj, many)
preresult = self._marshal(
obj,
self.fields,
many=many,
strict=self.strict,
accessor=self.__accessor__,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
**kwargs
)
result = self._postprocess(preresult, many, obj=obj)
errors = self._marshal.errors
result = self._invoke_dump_processors(POST_DUMP, result, many)
return MarshalResult(result, errors)
def dumps(self, obj, many=None, update_fields=True, *args, **kwargs):
"""Same as :meth:`dump`, except return a JSON-encoded string.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:param bool update_fields: Whether to update the schema's field classes. Typically
set to `True`, but may be `False` when serializing a homogenous collection.
This parameter is used by `fields.Nested` to avoid multiple updates.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `MarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
deserialized, errors = self.dump(obj, many=many, update_fields=update_fields)
ret = self.opts.json_module.dumps(deserialized, *args, **kwargs)
return MarshalResult(ret, errors)
def load(self, data, many=None):
"""Deserialize a data structure to an object defined by this Schema's
fields and :meth:`make_object`.
:param dict data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `UnmarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
result, errors = self._do_load(data, many, postprocess=True)
return UnmarshalResult(data=result, errors=errors)
def loads(self, json_data, many=None, *args, **kwargs):
"""Same as :meth:`load`, except it takes a JSON string as input.
:param str json_data: A JSON string of the data to deserialize.
:param bool many: Whether to deserialize `obj` as a collection. If `None`, the
value for `self.many` is used.
:return: A tuple of the form (``data``, ``errors``)
:rtype: `UnmarshalResult`, a `collections.namedtuple`
.. versionadded:: 1.0.0
"""
data = self.opts.json_module.loads(json_data, *args, **kwargs)
return self.load(data, many=many)
def validate(self, data, many=None):
"""Validate `data` against the schema, returning a dictionary of
validation errors.
:param dict data: The data to validate.
:param bool many: Whether to validate `data` as a collection. If `None`, the
value for `self.many` is used.
:return: A dictionary of validation errors.
:rtype: dict
.. versionadded:: 1.1.0
"""
_, errors = self._do_load(data, many, postprocess=False)
return errors
def make_object(self, data):
"""Override-able method that defines how to create the final deserialization
output. Defaults to noop (i.e. just return ``data`` as is).
:param dict data: The deserialized data.
.. versionadded:: 1.0.0
"""
return data
##### Private Helpers #####
def _do_load(self, data, many=None, postprocess=True):
"""Deserialize `data`, returning the deserialized result and a dictonary of
validation errors.
:param data: The data to deserialize.
:param bool many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param bool postprocess: Whether to postprocess the data with `make_object`.
:return: A tuple of the form (`data`, `errors`)
"""
many = self.many if many is None else bool(many)
data = self._invoke_load_processors(PRE_LOAD, data, many)
# Bind self as the first argument of validators and preprocessors
if self.__validators__:
validators = [partial(func, self)
for func in self.__validators__]
else:
validators = []
if self.__preprocessors__:
preprocessors = [partial(func, self)
for func in self.__preprocessors__]
else:
preprocessors = []
postprocess_funcs = [self.make_object] if postprocess else []
result = self._unmarshal(
data,
self.fields,
many=many,
strict=self.strict,
validators=validators,
preprocess=preprocessors,
postprocess=postprocess_funcs,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
self._invoke_field_validators(data=result, many=many)
# Run schema-level migration
self._invoke_validators(raw=True, data=result, original_data=data, many=many)
self._invoke_validators(raw=False, data=result, original_data=data, many=many)
errors = self._unmarshal.errors
if errors and callable(self.__error_handler__):
self.__error_handler__(errors, data)
result = self._invoke_load_processors(POST_LOAD, result, many)
return result, errors
def _update_fields(self, obj=None, many=False):
"""Update fields based on the passed in object."""
if self.only:
# Return only fields specified in fields option
field_names = self.set_class(self.only)
elif self.opts.fields:
# Return fields specified in fields option
field_names = self.set_class(self.opts.fields)
elif self.opts.additional:
# Return declared fields + additional fields
field_names = (self.set_class(self.declared_fields.keys()) |
self.set_class(self.opts.additional))
else:
field_names = self.set_class(self.declared_fields.keys())
# If "exclude" option or param is specified, remove those fields
excludes = set(self.opts.exclude) | set(self.exclude)
if excludes:
field_names = field_names - excludes
ret = self.__filter_fields(field_names, obj, many=many)
# Set parents
self.__set_field_attrs(ret)
self.fields = ret
return self.fields
def __set_field_attrs(self, fields_dict):
"""Update fields with values from schema.
Also set field load_only and dump_only values if field_name was
specified in ``class Meta``.
"""
for field_name, field_obj in iteritems(fields_dict):
try:
if field_name in self.load_only:
field_obj.load_only = True
if field_name in self.dump_only:
field_obj.dump_only = True
field_obj._add_to_schema(field_name, self)
except TypeError:
# field declared as a class, not an instance
if (isinstance(field_obj, type) and
issubclass(field_obj, base.FieldABC)):
msg = ('Field for "{0}" must be declared as a '
'Field instance, not a class. '
'Did you mean "fields.{1}()"?'
.format(field_name, field_obj.__name__))
raise TypeError(msg)
return fields_dict
def __filter_fields(self, field_names, obj, many=False):
"""Return only those field_name:field_obj pairs specified by
``field_names``.
:param set field_names: Field names to include in the final
return dictionary.
:returns: An dict of field_name:field_obj pairs.
"""
if obj and many:
try: # Homogeneous collection
obj_prototype = next(iter(obj))
except StopIteration: # Nothing to serialize
return self.declared_fields
obj = obj_prototype
ret = self.dict_class()
for key in field_names:
if key in self.declared_fields:
ret[key] = self.declared_fields[key]
else: # Implicit field creation (class Meta 'fields' or 'additional')
if obj:
attribute_type = None
try:
if isinstance(obj, Mapping):
attribute_type = type(obj[key])
else:
attribute_type = type(getattr(obj, key))
except (AttributeError, KeyError) as err:
err_type = type(err)
raise err_type(
'"{0}" is not a valid field for {1}.'.format(key, obj))
field_obj = self.TYPE_MAPPING.get(attribute_type, fields.Field)()
else: # Object is None
field_obj = fields.Field()
# map key -> field (default to Raw)
ret[key] = field_obj
return ret
def _invoke_dump_processors(self, tag_name, data, many):
# The raw post-dump processors may do things like add an envelope, so
# invoke those after invoking the non-raw processors which will expect
# to get a list of items.
data = self._invoke_processors(tag_name, raw=False, data=data, many=many)
data = self._invoke_processors(tag_name, raw=True, data=data, many=many)
return data
def _invoke_load_processors(self, tag_name, data, many):
# This has to invert the order of the dump processors, so run the raw
# processors first.
data = self._invoke_processors(tag_name, raw=True, data=data, many=many)
data = self._invoke_processors(tag_name, raw=False, data=data, many=many)
return data
def _invoke_field_validators(self, data, many):
for attr_name in self.__processors__[(VALIDATES, False)]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_kwargs__[(VALIDATES, False)]
field_name = validator_kwargs['field_name']
try:
field_obj = self.fields[field_name]
except KeyError:
raise ValueError('"{0}" field does not exist.'.format(field_name))
if many:
for idx, item in enumerate(data):
try:
value = item[field_name]
except KeyError:
pass
else:
self._unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_name,
field_obj=field_obj,
index=(idx if self.opts.index_errors else None)
)
else:
try:
value = data[field_name]
except KeyError:
pass
else:
self._unmarshal.call_and_store(
getter_func=validator,
data=value,
field_name=field_name,
field_obj=field_obj
)
def _invoke_validators(self, raw, data, original_data, many):
for attr_name in self.__processors__[(VALIDATES_SCHEMA, raw)]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_kwargs__[(VALIDATES_SCHEMA, raw)]
pass_original = validator_kwargs.get('pass_original', False)
if raw:
validator = partial(validator, many=many)
if many:
for idx, item in enumerate(data):
self._unmarshal._run_validator(validator,
item, original_data, self.fields, strict=self.strict, many=many,
index=idx, pass_original=pass_original)
else:
self._unmarshal._run_validator(validator,
data, original_data, self.fields, strict=self.strict, many=many,
pass_original=pass_original)
return None
def _invoke_processors(self, tag_name, raw, data, many):
for attr_name in self.__processors__[(tag_name, raw)]:
# This will be a bound method.
processor = getattr(self, attr_name)
# It's probably not worth the extra LoC to hoist this branch out of
# the loop.
if raw:
data = utils.if_none(processor(data, many), data)
elif many:
data = [utils.if_none(processor(item), item) for item in data]
else:
data = utils.if_none(processor(data), data)
return data
class Schema(with_metaclass(SchemaMeta, BaseSchema)):
__doc__ = BaseSchema.__doc__
|
VladimirPal/marshmallow
|
marshmallow/schema.py
|
Python
|
mit
| 34,135
|
#!/usr/bin/env python
import sys
import argparse
import regrws.convert
import regrws.payload.org
import regrws.method.org
try:
from apikey import APIKEY
except ImportError:
APIKEY = None
description = 'Create ARIN recipient ORG from template'
epilog = 'API key can be omitted if APIKEY is defined in apikey.py'
arg_parser = argparse.ArgumentParser(description=description, epilog=epilog)
arg_parser.add_argument('-k', '--key', help='ARIN API key',
required=False if APIKEY else True, dest='api_key')
arg_parser.add_argument('-s', '--source-address', help='Source IP address')
arg_parser.add_argument('net_handle', metavar='NET_HANDLE')
arg_parser.add_argument('-t', '--test', action='store_true',
help='Test mode: omit actual RESTful call')
arg_parser.add_argument('template_file', metavar='TEMPLATE_FILE')
args = arg_parser.parse_args()
if args.api_key:
APIKEY = args.api_key
with open(args.template_file, 'r') as fh:
template = fh.readlines()
parser = regrws.convert.DictFromTemplate(template)
converter = regrws.convert.PayloadFromTemplateDict(parser.run(),
regrws.payload.org.org)
payload_in = converter.run()
if args.test:
payload_in.export(sys.stderr, 0)
else:
session = regrws.restful.Session(APIKEY, args.source_address)
method = regrws.method.org.Create(session, args.net_handle)
try:
payload_out = method.call(payload_in)
except regrws.restful.RegRwsError as exception:
print exception.args
|
RhubarbSin/arin-reg-rws
|
org_create.py
|
Python
|
mit
| 1,557
|
import bisect
import difflib
import sys
import warnings
import rope.base.oi.doa
import rope.base.oi.objectinfo
import rope.base.oi.soa
from rope.base import ast, exceptions, taskhandle, utils, stdmods
from rope.base.exceptions import ModuleNotFoundError
from rope.base.pyobjectsdef import PyModule, PyPackage, PyClass
import rope.base.resources
import rope.base.resourceobserver
from rope.base import builtins
class PyCore(object):
def __init__(self, project):
self.project = project
self._init_resource_observer()
self.cache_observers = []
self.module_cache = _ModuleCache(self)
self.extension_cache = _ExtensionCache(self)
self.object_info = rope.base.oi.objectinfo.ObjectInfoManager(project)
self._init_python_files()
self._init_automatic_soa()
self._init_source_folders()
def _init_python_files(self):
self.python_matcher = None
patterns = self.project.prefs.get('python_files', None)
if patterns is not None:
self.python_matcher = rope.base.resources._ResourceMatcher()
self.python_matcher.set_patterns(patterns)
def _init_resource_observer(self):
callback = self._invalidate_resource_cache
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.observer = rope.base.resourceobserver.FilteredResourceObserver(observer)
self.project.add_observer(self.observer)
def _init_source_folders(self):
self._custom_source_folders = []
for path in self.project.prefs.get('source_folders', []):
folder = self.project.get_resource(path)
self._custom_source_folders.append(folder)
def _init_automatic_soa(self):
if not self.automatic_soa:
return
callback = self._file_changed_for_soa
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.project.add_observer(observer)
@property
def automatic_soa(self):
auto_soa = self.project.prefs.get('automatic_soi', None)
return self.project.prefs.get('automatic_soa', auto_soa)
def _file_changed_for_soa(self, resource, new_resource=None):
old_contents = self.project.history.\
contents_before_current_change(resource)
if old_contents is not None:
perform_soa_on_changed_scopes(self.project, resource, old_contents)
def is_python_file(self, resource):
if resource.is_folder():
return False
if self.python_matcher is None:
return resource.name.endswith('.py')
return self.python_matcher.does_match(resource)
def get_module(self, name, folder=None):
"""Returns a `PyObject` if the module was found."""
# check if this is a builtin module
pymod = self._builtin_module(name)
if pymod is not None:
return pymod
module = self.find_module(name, folder)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def _builtin_submodules(self, modname):
result = {}
for extension in self.extension_modules:
if extension.startswith(modname + '.'):
name = extension[len(modname) + 1:]
if '.' not in name:
result[name] = self._builtin_module(extension)
return result
def _builtin_module(self, name):
return self.extension_cache.get_pymodule(name)
def get_relative_module(self, name, folder, level):
module = self.find_relative_module(name, folder, level)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def get_string_module(self, code, resource=None, force_errors=False):
"""Returns a `PyObject` object for the given code
If `force_errors` is `True`, `exceptions.ModuleSyntaxError` is
raised if module has syntax errors. This overrides
``ignore_syntax_errors`` project config.
"""
return PyModule(self, code, resource, force_errors=force_errors)
def get_string_scope(self, code, resource=None):
"""Returns a `Scope` object for the given code"""
return self.get_string_module(code, resource).get_scope()
def _invalidate_resource_cache(self, resource, new_resource=None):
for observer in self.cache_observers:
observer(resource)
def _find_module_in_folder(self, folder, modname):
module = folder
packages = modname.split('.')
for pkg in packages[:-1]:
if module.is_folder() and module.has_child(pkg):
module = module.get_child(pkg)
else:
return None
if module.is_folder():
if module.has_child(packages[-1]) and \
module.get_child(packages[-1]).is_folder():
return module.get_child(packages[-1])
elif module.has_child(packages[-1] + '.py') and \
not module.get_child(packages[-1] + '.py').is_folder():
return module.get_child(packages[-1] + '.py')
def get_python_path_folders(self):
import rope.base.project
result = []
for src in self.project.prefs.get('python_path', []) + sys.path:
try:
src_folder = rope.base.project.get_no_project().get_resource(src)
result.append(src_folder)
except rope.base.exceptions.ResourceNotFoundError:
pass
return result
def find_module(self, modname, folder=None):
"""Returns a resource corresponding to the given module
returns None if it can not be found
"""
return self._find_module(modname, folder)
def find_relative_module(self, modname, folder, level):
for i in range(level - 1):
folder = folder.parent
if modname == '':
return folder
else:
return self._find_module_in_folder(folder, modname)
def _find_module(self, modname, folder=None):
"""Return `modname` module resource"""
for src in self.get_source_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
for src in self.get_python_path_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
if folder is not None:
module = self._find_module_in_folder(folder, modname)
if module is not None:
return module
return None
# INFO: It was decided not to cache source folders, since:
# - Does not take much time when the root folder contains
# packages, that is most of the time
# - We need a separate resource observer; `self.observer`
# does not get notified about module and folder creations
def get_source_folders(self):
"""Returns project source folders"""
if self.project.root is None:
return []
result = list(self._custom_source_folders)
result.extend(self._find_source_folders(self.project.root))
return result
def resource_to_pyobject(self, resource, force_errors=False):
return self.module_cache.get_pymodule(resource, force_errors)
def get_python_files(self):
"""Returns all python files available in the project"""
return [resource for resource in self.project.get_files()
if self.is_python_file(resource)]
def _is_package(self, folder):
if folder.has_child('__init__.py') and \
not folder.get_child('__init__.py').is_folder():
return True
else:
return False
def _find_source_folders(self, folder):
for resource in folder.get_folders():
if self._is_package(resource):
return [folder]
result = []
for resource in folder.get_files():
if resource.name.endswith('.py'):
result.append(folder)
break
for resource in folder.get_folders():
result.extend(self._find_source_folders(resource))
return result
def run_module(self, resource, args=None, stdin=None, stdout=None):
"""Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process.
"""
perform_doa = self.project.prefs.get('perform_doi', True)
perform_doa = self.project.prefs.get('perform_doa', perform_doa)
receiver = self.object_info.doa_data_received
if not perform_doa:
receiver = None
runner = rope.base.oi.doa.PythonFileRunner(
self, resource, args, stdin, stdout, receiver)
runner.add_finishing_observer(self.module_cache.forget_all_data)
runner.run()
return runner
def analyze_module(self, resource, should_analyze=lambda py: True,
search_subscopes=lambda py: True, followed_calls=None):
"""Analyze `resource` module for static object inference
This function forces rope to analyze this module to collect
information about function calls. `should_analyze` is a
function that is called with a `PyDefinedObject` argument. If
it returns `True` the element is analyzed. If it is `None` or
returns `False` the element is not analyzed.
`search_subscopes` is like `should_analyze`; The difference is
that if it returns `False` the sub-scopes are all ignored.
That is it is assumed that `should_analyze` returns `False`
for all of its subscopes.
`followed_calls` override the value of ``soa_followed_calls``
project config.
"""
if followed_calls is None:
followed_calls = self.project.prefs.get('soa_followed_calls', 0)
pymodule = self.resource_to_pyobject(resource)
self.module_cache.forget_all_data()
rope.base.oi.soa.analyze_module(
self, pymodule, should_analyze, search_subscopes, followed_calls)
def get_classes(self, task_handle=taskhandle.NullTaskHandle()):
warnings.warn('`PyCore.get_classes()` is deprecated',
DeprecationWarning, stacklevel=2)
return []
def __str__(self):
return str(self.module_cache) + str(self.object_info)
def modname(self, resource):
if resource.is_folder():
module_name = resource.name
source_folder = resource.parent
elif resource.name == '__init__.py':
module_name = resource.parent.name
source_folder = resource.parent.parent
else:
module_name = resource.name[:-3]
source_folder = resource.parent
while source_folder != source_folder.parent and \
source_folder.has_child('__init__.py'):
module_name = source_folder.name + '.' + module_name
source_folder = source_folder.parent
return module_name
@property
@utils.cacheit
def extension_modules(self):
result = set(self.project.prefs.get('extension_modules', []))
if self.project.prefs.get('import_dynload_stdmods', False):
result.update(stdmods.dynload_modules())
return result
class _ModuleCache(object):
def __init__(self, pycore):
self.pycore = pycore
self.module_map = {}
self.pycore.cache_observers.append(self._invalidate_resource)
self.observer = self.pycore.observer
def _invalidate_resource(self, resource):
if resource in self.module_map:
self.forget_all_data()
self.observer.remove_resource(resource)
del self.module_map[resource]
def get_pymodule(self, resource, force_errors=False):
if resource in self.module_map:
return self.module_map[resource]
if resource.is_folder():
result = PyPackage(self.pycore, resource,
force_errors=force_errors)
else:
result = PyModule(self.pycore, resource=resource,
force_errors=force_errors)
if result.has_errors:
return result
self.module_map[resource] = result
self.observer.add_resource(resource)
return result
def forget_all_data(self):
for pymodule in self.module_map.values():
pymodule._forget_concluded_data()
def __str__(self):
return 'PyCore caches %d PyModules\n' % len(self.module_map)
class _ExtensionCache(object):
def __init__(self, pycore):
self.pycore = pycore
self.extensions = {}
def get_pymodule(self, name):
if name == '__builtin__':
return builtins.builtins
allowed = self.pycore.extension_modules
if name not in self.extensions and name in allowed:
self.extensions[name] = builtins.BuiltinModule(name, self.pycore)
return self.extensions.get(name)
def perform_soa_on_changed_scopes(project, resource, old_contents):
pycore = project.pycore
if resource.exists() and pycore.is_python_file(resource):
try:
new_contents = resource.read()
# detecting changes in new_contents relative to old_contents
detector = _TextChangeDetector(new_contents, old_contents)
def search_subscopes(pydefined):
scope = pydefined.get_scope()
return detector.is_changed(scope.get_start(), scope.get_end())
def should_analyze(pydefined):
scope = pydefined.get_scope()
start = scope.get_start()
end = scope.get_end()
return detector.consume_changes(start, end)
pycore.analyze_module(resource, should_analyze, search_subscopes)
except exceptions.ModuleSyntaxError:
pass
class _TextChangeDetector(object):
def __init__(self, old, new):
self.old = old
self.new = new
self._set_diffs()
def _set_diffs(self):
differ = difflib.Differ()
self.lines = []
lineno = 0
for line in differ.compare(self.old.splitlines(True),
self.new.splitlines(True)):
if line.startswith(' '):
lineno += 1
elif line.startswith('-'):
lineno += 1
self.lines.append(lineno)
def is_changed(self, start, end):
"""Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1.
"""
left, right = self._get_changed(start, end)
if left < right:
return True
return False
def consume_changes(self, start, end):
"""Clear the changed status of lines from start till end"""
left, right = self._get_changed(start, end)
if left < right:
del self.lines[left:right]
return left < right
def _get_changed(self, start, end):
left = bisect.bisect_left(self.lines, start)
right = bisect.bisect_right(self.lines, end)
return left, right
|
JetChars/vim
|
vim/bundle/python-mode/pymode/libs3/rope/base/pycore.py
|
Python
|
apache-2.0
| 15,520
|
import sys
import dl
sys.setdlopenflags(dl.RTLD_GLOBAL|dl.RTLD_NOW)
import ascpy
import gtkbrowser
print "python: creating new library object\n";
#----errror callback function-----
def error_reporter(sev,filename,line,msg):
if(sev==1):
leftstr = ""
rightstr = "\n"
typestr = "Note: "
elif(sev==2):
leftstr = chr(27)+"[33;2m"
rightstr = chr(27)+"[0m\n"
typestr = "Warning: "
elif(sev==3):
leftstr = chr(27)+"[31;1m"
rightstr = chr(27)+"[0m\n"
typestr = "Error: "
elif(sev==4):
# this is the default case, so keep it quiet:
leftstr = chr(27)+"[33;2m"
rightstr = chr(27)+"[0m"
typestr = ""
elif(sev==5):
leftstr = chr(27)+"[33;2m"
rightstr = chr(27)+"[0m\n"
typestr = "PROGRAM WARNING: "
elif(sev==6):
leftstr = chr(27)+"[31;1m"
rightstr = chr(27)+"[0m\n"
typestr = "PROGRAM ERROR: "
else:
typestr = "";
leftstr = rightstr = ""
if(filename):
outputstr = "%s%s:%d: %s%s\n" % (leftstr,filename,line,msg.strip(),rightstr)
else:
outputstr = "%s%s%s%s" % (leftstr,typestr,msg,rightstr)
sys.stderr.write(outputstr)
return len(outputstr)
#---------output model hierarchy---------------
def show(i,level=0):
sys.stderr.write((" "*level)+i.getName().toString()+" IS_A "+i.getType().getName().toString())
if i.isCompound():
if i.isChildless():
sys.stderr.write(": no children)\n")
else:
sys.stderr.write(":\n");
for c in i.getChildren():
show(c,level+1)
elif i.isRelation() or i.isWhen():
sys.stderr.write("\n")
elif i.isSet():
if i.isSetInt():
set = i.getSetIntValue()
elif i.isSetString():
set = i.getSetStringValue()
#sys.stderr.write("[%d]:" % set.length())
sys.stderr.write(" = %s\n" % set);
elif ( i.isAtom() or i.isFund() ) and not i.isDefined():
sys.stderr.write(" (undefined)\n")
elif i.isBool():
sys.stderr.write(" = "); sys.stderr.write("%s" % i.getBoolValue()); sys.stderr.write("\n")
elif i.isInt():
sys.stderr.write(" = "); sys.stderr.write("%d" % i.getIntValue()); sys.stderr.write("\n")
else:
if i.getType().isRefinedSolverVar():
if i.isFixed():
sys.stderr.write(" = "+chr(27)+"[1;43;37m"); sys.stderr.write("%f" % i.getRealValue()); sys.stderr.write(chr(27)+"[0m\n")
else:
sys.stderr.write(" = "+chr(27)+"[1m"); sys.stderr.write("%f" % i.getRealValue()); sys.stderr.write(chr(27)+"[0m\n")
else:
sys.stderr.write(" = "); sys.stderr.write("%f" % i.getRealValue()); sys.stderr.write("\n")
#-------------------------------
reporter = ascpy.getReporter()
reporter.setPythonErrorCallback(error_reporter)
#reporter.reportError("STUFF")
l = ascpy.Library()
l.listModules()
#t = l.findType("boolean")
l.load("simple_fs.a4c")
mv = l.getModules()
m = mv[0]
tv = l.getModuleTypes(m)
#for m in mv:
# print "Module ", m.getName()
# for t in l.getModuleTypes(m):
# print " - Type ", t.getName()
#l.listModules();
#t = l.findType("boolean_var")
#t = l.findType("boolean")
t = l.findType("test_flowsheet")
# t = l.findType("my_water3")
sim = t.getSimulation("i")
sim.check()
print "Simulation instance kind:", sim.getKindStr()
pv = t.getMethods()
p_specify = 0
print "Listing methods of",t.getName,":"
for p in pv:
print " *", p.getName()
if p.getName()=="specify":
p_specify = p
print "Running '"+p_specify.getName()+"'"
print p_specify
sim.run(p_specify)
sim.build()
sim.check()
print sim.getFixableVariables()
sim.solve()
ch = sim.getModel().getChildren()
print "Children of",sim.getName(),":"
print ch
print "Children of",ch[1].getName(),":"
print ch[1].getChildren()
show(sim.getModel())
b = gtkbrowser.Browser(sim)
gtkbrowser.gtk.main()
print "COMPLETED"
del l
|
georgyberdyshev/ascend
|
pygtk/librarytest.py
|
Python
|
gpl-2.0
| 3,626
|
from django.conf import settings
from django.utils.safestring import SafeString
from django import template
register = template.Library()
def dfp(slot):
html = SafeString()
data = settings.DFP_SLOTS
data = data[slot]
html+='<div class="dfp-ads" data-slot="%s" data-account="%s" >' % (data['slot'],data['account'])
for version in data['responsive']:
html+='<div data-sizes="%s" data-media-query="%s"></div>' % (",".join("%sx%s" % s for s in version['sizes']), version['media_query'])
html+="</div>"
return html
return datetime.datetime.now().strftime(format_string)
register.simple_tag(dfp)
|
divadrei/django-responsive-dfp
|
dfp/templatetags/dfp.py
|
Python
|
mit
| 636
|
import re
n = int(input())
s = ' ' + input().replace(' ', ' ') + ' '
p = re.compile(r'(\s\d+\s)\1{2,}')
while True:
c = len(s)
s = re.sub(p, '', s, count=1)
if len(s) == c:
break
print(n - len(s.split()))
|
altg0x0/info_tasks
|
18/f1.py
|
Python
|
unlicense
| 228
|
from flask import Flask
from flask_restful import Api
from services import OrderService, OrderListService
app = Flask(__name__)
api = Api(app)
api.add_resource(OrderListService, '/rest/orders/')
api.add_resource(OrderService, '/rest/orders/<int:id>')
if __name__ == '__main__':
app.run(debug=True)
|
LukasRychtecky/wa2
|
2_RESTful/python/app/server.py
|
Python
|
mit
| 307
|
"""
Utility methods for bokchoy database manipulation.
"""
from __future__ import print_function
import os
import tarfile
import boto
from paver.easy import BuildFailure, sh
from pavelib.prereqs import compute_fingerprint
from pavelib.utils.envs import Env
CACHE_FOLDER = 'common/test/db_cache'
FINGERPRINT_FILEPATH = '{}/{}/bok_choy_migrations.sha1'.format(Env.REPO_ROOT, CACHE_FOLDER)
def remove_files_from_folder(files, folder):
"""
Remove the specified files from the folder.
Catch any errors as nonfatal.
"""
for file_name in files:
file_with_path = os.path.join(folder, file_name)
try:
os.remove(file_with_path)
print('\tRemoved {}'.format(file_with_path))
except OSError:
print('\tCould not remove {}. Continuing.'.format(file_with_path))
continue
def reset_test_db(db_cache_files, update_cache_files=True):
"""
Reset the bokchoy test db for a new test run
The called script will flush your db (or create it if it doesn't yet
exist), load in the db cache files files if they exist on disk,
and optionally apply migrations and write up-to-date cache files.
"""
cmd = '{}/scripts/reset-test-db.sh'.format(Env.REPO_ROOT)
if update_cache_files:
cmd = '{} --rebuild_cache'.format(cmd)
sh(cmd)
verify_files_exist(db_cache_files)
def compute_fingerprint_and_write_to_disk(migration_output_files, all_db_files):
"""
Write the fingerprint for the bok choy migrations state to disk.
"""
fingerprint = fingerprint_bokchoy_db_files(migration_output_files, all_db_files)
write_fingerprint_to_file(fingerprint)
return fingerprint
def fingerprint_bokchoy_db_files(migration_output_files, all_db_files):
"""
Generate a sha1 checksum for files used to configure the bokchoy
databases. This checksum will represent the current 'state' of
the databases, including schema and data, as well as the yaml files
that contain information about all the migrations.
It can be used to determine if migrations need to be run after
loading the schema and data.
"""
calculate_bokchoy_migrations(migration_output_files)
msg = "Verifying that all files needed to compute the fingerprint exist."
print(msg)
verify_files_exist(all_db_files)
file_paths = [
os.path.join(CACHE_FOLDER, db_file) for db_file in all_db_files
]
msg = "Computing the fingerprint."
print(msg)
fingerprint = compute_fingerprint(file_paths)
print("The fingerprint for bokchoy db files is: {}".format(fingerprint))
return fingerprint
def write_fingerprint_to_file(fingerprint):
"""
Write the fingerprint of the database files to disk for use
in future comparisons. This file gets checked into the repo
along with the files.
"""
with open(FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(fingerprint)
def verify_files_exist(files):
"""
Verify that the files were created.
This will us help notice/prevent breakages due to
changes to the bash script file.
"""
for file_name in files:
file_path = os.path.join(CACHE_FOLDER, file_name)
if not os.path.isfile(file_path):
msg = "Did not find expected file: {}".format(file_path)
raise BuildFailure(msg)
def calculate_bokchoy_migrations(migration_output_files):
"""
Run the calculate-bokchoy-migrations script, which will generate two
yml files. These will tell us whether or not we need to run migrations.
NOTE: the script first clears out the database, then calculates
what migrations need to be run, which is all of them.
"""
sh('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT))
verify_files_exist(migration_output_files)
def does_fingerprint_on_disk_match(fingerprint):
"""
Determine if the fingerprint for the bokchoy database cache files
that was written to disk matches the one specified.
"""
cache_fingerprint = get_bokchoy_db_fingerprint_from_file()
return fingerprint == cache_fingerprint
def is_fingerprint_in_bucket(fingerprint, bucket_name):
"""
Test if a zip file matching the given fingerprint is present within an s3 bucket
"""
zipfile_name = '{}.tar.gz'.format(fingerprint)
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
return key.exists()
def get_bokchoy_db_fingerprint_from_file():
"""
Return the value recorded in the fingerprint file.
"""
try:
with open(FINGERPRINT_FILEPATH, 'r') as fingerprint_file:
cached_fingerprint = fingerprint_file.read().strip()
except IOError:
return None
return cached_fingerprint
def get_file_from_s3(bucket_name, zipfile_name, path):
"""
Get the file from s3 and save it to disk.
"""
print ("Retrieving {} from bucket {}.".format(zipfile_name, bucket_name))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
if not key.exists():
msg = "Did not find expected file {} in the S3 bucket {}".format(
zipfile_name, bucket_name
)
raise BuildFailure(msg)
zipfile_path = os.path.join(path, zipfile_name)
key.get_contents_to_filename(zipfile_path)
def extract_files_from_zip(files, zipfile_path, to_path):
"""
Extract files from a zip.
"""
with tarfile.open(name=zipfile_path, mode='r') as tar_file:
for file_name in files:
tar_file.extract(file_name, path=to_path)
verify_files_exist(files)
def refresh_bokchoy_db_cache_from_s3(fingerprint, bucket_name, bokchoy_db_files):
"""
If the cache files for the current fingerprint exist
in s3 then replace what you have on disk with those.
If no copy exists on s3 then continue without error.
"""
path = CACHE_FOLDER
if is_fingerprint_in_bucket(fingerprint, bucket_name):
zipfile_name = '{}.tar.gz'.format(fingerprint)
get_file_from_s3(bucket_name, zipfile_name, path)
zipfile_path = os.path.join(path, zipfile_name)
print ("Extracting db cache files.")
extract_files_from_zip(bokchoy_db_files, zipfile_path, path)
os.remove(zipfile_path)
def create_tarfile_from_db_cache(fingerprint, files, path):
"""
Create a tar.gz file with the current bokchoy DB cache files.
"""
zipfile_name = '{}.tar.gz'.format(fingerprint)
zipfile_path = os.path.join(path, zipfile_name)
with tarfile.open(name=zipfile_path, mode='w:gz') as tar_file:
for name in files:
tar_file.add(os.path.join(path, name), arcname=name)
return zipfile_name, zipfile_path
def upload_to_s3(file_name, file_path, bucket_name):
"""
Upload the specified files to an s3 bucket.
"""
print ("Uploading {} to s3 bucket {}".format(file_name, bucket_name))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = boto.s3.key.Key(bucket=bucket, name=file_name)
bytes_written = key.set_contents_from_filename(file_path, replace=False)
if bytes_written:
msg = "Wrote {} bytes to {}.".format(bytes_written, key.name)
else:
msg = "File {} already existed in bucket {}.".format(key.name, bucket_name)
print (msg)
def upload_db_cache_to_s3(fingerprint, bokchoy_db_files, bucket_name):
"""
Update the S3 bucket with the bokchoy DB cache files.
"""
zipfile_name, zipfile_path = create_tarfile_from_db_cache(
fingerprint, bokchoy_db_files, CACHE_FOLDER
)
upload_to_s3(zipfile_name, zipfile_path, bucket_name)
|
hastexo/edx-platform
|
pavelib/utils/db_utils.py
|
Python
|
agpl-3.0
| 7,787
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common library methods used by both coordinator and task machines."""
import argparse
import logging
import os
import socket
import xmlrpclib
LOGGING_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR']
MY_IP = socket.gethostbyname(socket.gethostname())
SERVER_ADDRESS = ''
SERVER_PORT = 31710
DEFAULT_TIMEOUT_SECS = 20 * 60 # 30 minutes
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
SWARMING_DIR = os.path.join(THIS_DIR, '..', '..', 'tools', 'swarming_client')
def InitLogging():
"""Initialize the logging module.
Raises:
argparse.ArgumentError if the --verbosity arg is incorrect.
"""
parser = argparse.ArgumentParser()
logging_action = parser.add_argument('--verbosity', default='INFO')
args, _ = parser.parse_known_args()
if args.verbosity not in LOGGING_LEVELS:
raise argparse.ArgumentError(
logging_action, 'Only levels %s supported' % str(LOGGING_LEVELS))
logging.basicConfig(
format='%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s',
datefmt='%H:%M:%S', level=args.verbosity)
|
SaschaMester/delicium
|
testing/legion/common_lib.py
|
Python
|
bsd-3-clause
| 1,225
|
from django.utils.translation import ugettext_lazy as _, ugettext
from .common import *
class NewswallDateNavigationExtension(NavigationExtension):
"""
Navigation extension for FeinCMS which shows a year and month Breakdown:
2012
April
March
February
January
2011
2010
"""
name = _('Newswall date')
def children(self, page, **kwargs):
for year, months in date_tree():
yield PagePretender(
title=u'%s' % year,
url='%s%s/' % (page.get_absolute_url(), year),
tree_id=page.tree_id, # pretty funny tree hack
lft=0,
rght=len(months)+1,
level=page.level+1,
slug='%s' % year,
)
for month in months:
yield PagePretender(
title=u'%s' % ugettext(all_months[month-1].strftime('%B')),
url='%s%04d/%02d/' % (page.get_absolute_url(), year, month),
tree_id=page.tree_id, # pretty funny tree hack
lft=0,
rght=0,
level=page.level+2,
slug='%04d/%02d' % (year, month),
)
|
michaelkuty/django-newswall
|
newswall/navigation_extensions/treeinfo.py
|
Python
|
bsd-3-clause
| 1,250
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from tableauserverclient import WorkbookItem
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.tableau.hooks.tableau import TableauHook
class TableauRefreshWorkbookOperator(BaseOperator):
"""
Refreshes a Tableau Workbook/Extract
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#workbooks
:param workbook_name: The name of the workbook to refresh.
:type workbook_name: str
:param site_id: The id of the site where the workbook belongs to.
:type site_id: Optional[str]
:param blocking: By default the extract refresh will be blocking means it will wait until it has finished.
:type blocking: bool
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
:type tableau_conn_id: str
"""
def __init__(
self,
*,
workbook_name: str,
site_id: Optional[str] = None,
blocking: bool = True,
tableau_conn_id: str = 'tableau_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.workbook_name = workbook_name
self.site_id = site_id
self.blocking = blocking
self.tableau_conn_id = tableau_conn_id
def execute(self, context: dict) -> str:
"""
Executes the Tableau Extract Refresh and pushes the job id to xcom.
:param context: The task context during execution.
:type context: dict
:return: the id of the job that executes the extract refresh
:rtype: str
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
workbook = self._get_workbook_by_name(tableau_hook)
job_id = self._refresh_workbook(tableau_hook, workbook.id)
if self.blocking:
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
TableauJobStatusSensor(
job_id=job_id,
site_id=self.site_id,
tableau_conn_id=self.tableau_conn_id,
task_id='wait_until_succeeded',
dag=None,
).execute(context={})
self.log.info('Workbook %s has been successfully refreshed.', self.workbook_name)
return job_id
def _get_workbook_by_name(self, tableau_hook: TableauHook) -> WorkbookItem:
for workbook in tableau_hook.get_all(resource_name='workbooks'):
if workbook.name == self.workbook_name:
self.log.info('Found matching workbook with id %s', workbook.id)
return workbook
raise AirflowException(f'Workbook {self.workbook_name} not found!')
def _refresh_workbook(self, tableau_hook: TableauHook, workbook_id: str) -> str:
job = tableau_hook.server.workbooks.refresh(workbook_id)
self.log.info('Refreshing Workbook %s...', self.workbook_name)
return job.id
|
nathanielvarona/airflow
|
airflow/providers/tableau/operators/tableau_refresh_workbook.py
|
Python
|
apache-2.0
| 3,881
|
#
# Copyright 2017 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
from distutils.core import setup
setup(
name="youtubeUploader",
description="Script for uploading a video to YouTube",
version="0.1-prerelease",
scripts=[
"youtubeUploader.py",
],
requires=["youtubecli"],
maintainer_email="support@misd.isi.edu",
license='(new) BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
])
|
informatics-isi-edu/microscopy
|
youtube/youtubeUploader/setup.py
|
Python
|
apache-2.0
| 777
|
"""Test the dxf matplotlib backend.
Copyright (C) 2014 David M Kent
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import shutil
import tempfile
import unittest
import matplotlib
from matplotlib import pyplot as plt
from numpy.random import random
from mpldxf import backend_dxf
matplotlib.backend_bases.register_backend('dxf',
backend_dxf.FigureCanvas)
class DxfBackendTestCase(unittest.TestCase):
"""Tests for the dxf backend."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
plt.clf()
if os.path.isdir(self.test_dir):
shutil.rmtree(self.test_dir)
def test_plot(self):
"""Test a simple line-plot command."""
plt.plot(range(5), [4, 3, 2, 1, 0])
outfile = os.path.join(self.test_dir, 'test_plot.dxf')
plt.savefig(outfile)
self.assertTrue(os.path.isfile(outfile))
def test_boxplot(self):
"""Test a box-plot."""
plt.boxplot(random((4, 30)))
outfile = os.path.join(self.test_dir, 'test_boxplot.dxf')
plt.savefig(outfile)
self.assertTrue(os.path.isfile(outfile))
def test_contour(self):
"""Test some contours."""
plt.contour(random((30, 30)))
outfile = os.path.join(self.test_dir, 'test_contour.dxf')
plt.savefig(outfile)
self.assertTrue(os.path.isfile(outfile))
def test_contourf(self):
"""Test some filled contours."""
plt.contourf(random((30, 30)))
outfile = os.path.join(self.test_dir, 'test_contourf.dxf')
plt.savefig(outfile)
self.assertTrue(os.path.isfile(outfile))
|
dmkent/mpldxf
|
mpldxf/test_backend_ezdxf.py
|
Python
|
mit
| 2,665
|
# -*- coding: utf-8 -*-
"""
Custom serializers suitable to translated models.
"""
from __future__ import absolute_import, unicode_literals
from rest_framework import serializers
# Similar to DRF itself, expose all fields in the same manner.
from parler_rest.fields import TranslatedFieldsField, TranslatedField, TranslatedAbsoluteUrlField # noqa
class TranslatableModelSerializer(serializers.ModelSerializer):
"""
Serializer that saves :class:`TranslatedFieldsField` automatically.
"""
def save(self, **kwargs):
"""
Extract the translations and save them after main object save.
By default all translations will be saved no matter if creating
or updating an object. Users with more complex needs might define
their own save and handle translation saving themselves.
"""
translated_data = self._pop_translated_data()
instance = super(TranslatableModelSerializer, self).save(**kwargs)
self.save_translations(instance, translated_data)
return instance
def _pop_translated_data(self):
"""
Separate data of translated fields from other data.
"""
translated_data = {}
for meta in self.Meta.model._parler_meta:
translations = self.validated_data.pop(meta.rel_name, {})
if translations:
translated_data[meta.rel_name] = translations
return translated_data
def save_translations(self, instance, translated_data):
"""
Save translation data into translation objects.
"""
for meta in self.Meta.model._parler_meta:
translations = translated_data.get(meta.rel_name, {})
for lang_code, model_fields in translations.items():
translation = instance._get_translated_model(lang_code, auto_create=True, meta=meta)
for field, value in model_fields.items():
setattr(translation, field, value)
translation.save()
|
AdrianLC/django-parler-rest
|
parler_rest/serializers.py
|
Python
|
apache-2.0
| 2,014
|
# linux kernel version plugin by ine (2020)
from util import hook
from utilities import request
import re
@hook.command(autohelp=False)
def kernel(inp, reply=None):
data = request.get("https://www.kernel.org/finger_banner")
lines = data.split('\n')
versions = []
old_versions = []
for line in lines:
info = re.match(r'^The latest ([[a-z0-9 \-\.]+) version of the Linux kernel is:\s*(.*)$', line)
if info is None:
continue
name = info.group(1)
version = info.group(2)
if 'longterm' in name:
old_versions.append(version)
else:
versions.append(name + ': ' + version)
output = 'Linux kernel versions: ' + '; '.join(versions)
if len(old_versions) > 0:
output = output + '. Old longterm versions: ' + ', '.join(old_versions)
return output
|
FrozenPigs/Taigabot
|
plugins/kernel.py
|
Python
|
gpl-3.0
| 865
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-28 17:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testManage', '0003_auto_20161025_1503'),
]
operations = [
migrations.AddField(
model_name='level',
name='testObject',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='testManage.TestObject'),
),
migrations.AddField(
model_name='leveltype',
name='testObject',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='testManage.TestObject'),
),
migrations.AddField(
model_name='testobject',
name='description',
field=models.CharField(default='', max_length=4000),
),
migrations.AddField(
model_name='testobject',
name='title',
field=models.CharField(default='', max_length=200),
),
]
|
qinggeng/ceShiGuanLiXiTong
|
site/ceShiGuanLiSite/apps/testManage/migrations/0004_auto_20161028_1702.py
|
Python
|
mit
| 1,127
|
import sublime, sublime_plugin
from Default.exec import ExecCommand
# Related reading:
# http://stackoverflow.com/questions/41768673/let-sublime-choose-among-two-similar-build-systems
# This is very similar to shebanger.py. This uses the same method, only here the
# idea is that the build system holds the possible list of interpreters and the
# first line of the file is formatted to allow the build to select.
#
# This example is for selecting between a 32-bit or 64-bit version of Python,
# but it could easily be modified to select between different versions of
# python, operate for different languages, etc.
#
# In order to use this, you would need a build file that looks something like
# this. Here the important parts are the "target", the two different
# interpreters to use and that you should inject '\\$python' anywhere you want
# the Python interpreter to be inserted.
#
# {
# "target": "python_build",
# "cancel": { "kill": true },
#
# "shell_cmd": "\\$python -u \"$file\"",
# "file_regex": "^[ ]*File \"(...*?)\", line ([0-9]*)",
# "selector": "source.python",
#
# "python32": "python",
# "python64": "c:/python27-64/python",
#
# "env": {"PYTHONIOENCODING": "utf-8"},
#
# "variants":
# [
# {
# "name": "Syntax Check",
# "shell_cmd": "\\$python -m py_compile \"${file}\"",
# }
# ]
# }
class PythonBuildCommand(ExecCommand):
"""
A take on shebanger.py. Here the build system file explictly specifies what
to use for the executable for two different versions of python and the
build system will select the appropriate version based on the first line in
the file.
"""
def detect_version(self, filename, python32, python64):
with open(filename, 'r') as handle:
line = handle.readline()
return python64 if (line.startswith ("#") and "64" in line) else python32
def run(self, **kwargs):
current_file = self.window.active_view().file_name() or ''
python32 = kwargs.pop("python32", "python")
python64 = kwargs.pop("python64", "python")
variables = {
'python': self.detect_version(current_file, python32, python64)
}
kwargs = sublime.expand_variables(kwargs, variables)
super().run(**kwargs)
|
STealthy-and-haSTy/SublimeScraps
|
build_enhancements/python_build.py
|
Python
|
mit
| 2,325
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
class KeyPoolTest(VergeTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].getaddressinfo(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert_equal(wallet_info_old['hdseedid'], wallet_info_old['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] == wallet_info_old['hdseedid'])
# Encrypt wallet and wait to terminate
nodes[0].node_encrypt_wallet('test')
# Restart node 0
self.start_node(0)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].getaddressinfo(addr)
wallet_info = nodes[0].getwalletinfo()
assert_equal(wallet_info['hdseedid'], wallet_info['hdmasterkeyid'])
assert(addr_before_encrypting_data['hdseedid'] != wallet_info['hdseedid'])
assert(addr_data['hdseedid'] == wallet_info['hdseedid'])
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
|
vergecurrency/VERGE
|
test/functional/wallet_keypool.py
|
Python
|
mit
| 3,535
|
#!/usr/bin/env python2
import math,copy
"""PARAMETERS"""
shoulder_offset=37.5
hub_offset=50
arm_length=200
screw_spacing=300
screw_angles=[150,270,30]
start_positions=[253.2,252.9,253.2] #G92 X253.75 Y253.75 Z253.75
delta_radius=screw_spacing/3.0*math.sqrt(3)
screw_positions=[(delta_radius*math.cos(math.pi*screw_angles[i]/180.0),delta_radius*math.sin(math.pi*screw_angles[i]/180.0)) for i in range(3)]
coord={"X":0,"Y":0,"Z":0, "E":0, "F":1200}
f=file(raw_input("Input File: "))
def transform_raw(x,y,z):
thetas=[(((+.5-math.atan2(y-screw_positions[i][1],x-screw_positions[i][0])/2/math.pi+screw_angles[i]/360.0)+.5)%1-.5)*25.4 for i in range(3)]
ds=[math.sqrt((x-screw_positions[i][0])**2+(y-screw_positions[i][1])**2) for i in range(3)]
try:
return [z+thetas[i]+math.sqrt(arm_length**2-(ds[i]-hub_offset-shoulder_offset)**2) for i in range(3)]
except:
print x,y,z
def transform(x,y,z):
A,B,C=transform_raw(0,0,0)
a,b,c=transform_raw(x,y,z)
return a-A,b-B,c-C
#print "G1","X"+str(x-X),"Y"+str(y-Y),"Z"+str(z-Z)
print transform(0,0,0)
def getABC(position1):
if "X" not in position1:
return position1
position=copy.deepcopy(position1)
xs,ys,zs=coord["X"],coord["Y"],coord["Z"]
x,y,z,f=position["X"],position["Y"],position["Z"],position["F"]
a1,b1,c1=transform(xs,ys,zs)
a2,b2,c2=transform(x,y,z)
virtual_d=math.sqrt((a1-a2)**2+(b1-b2)**2+(c1-c2)**2)
d=math.sqrt((x-xs)**2+(y-ys)**2+(z-zs)**2)
fnew=f
if d!=0:
fnew=f*virtual_d/d
position['X']=a2
position['Y']=b2
position['Z']=c2
position['F']=fnew
return position
def distance(start, end):
try:
x1,y1,z1=start['X'],start['Y'],start['Z']
x2,y2,z2=end['X'],end['Y'],end['Z']
return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)
except:
return 0
def interpolate(start, end, i, n):
x1,y1,z1,e1=start['X'],start['Y'],start['Z'],start['E']
x2,y2,z2,e2=end['X'],end['Y'],end['Z'],end['E']
middle={}
for c in end:
if c in end and c in start and c!="F":
middle[c]=(i*end[c]+(n-i)*start[c])/n
else:
middle[c]=end[c]
return middle
def segmentize(start,end,maxLength):
l=distance(start,end)
if l<=maxLength:
return [end]
else:
output=[]
n=int(math.ceil(l/maxLength))
for i in range(1,n+1):
output.append(interpolate(start,end,i,n))
return output
prefixes="MGXYZESF"
commands="MG"
f2=file(raw_input("Output File: "),"w")
f2.write("G92 X"+str(start_positions[0])+" Y"+str(start_positions[1])+" Z"+str(start_positions[2])+" E0\n")
program=[]
move_count=0
for line in f:
line=line.strip()
chunks=line.split(";")[0].split(" ")
stuff={}
for chunk in chunks:
if len(chunk)>1:
stuff[chunk[0]]=chunk[1:]
try:
stuff[chunk[0]]=int(stuff[chunk[0]])
except:
try:
stuff[chunk[0]]=float(stuff[chunk[0]])
except:
pass
if "X" in stuff or "Y" in stuff or "Z" in stuff:
move_count+=1
for c in coord:
if c not in stuff:
stuff[c]=coord[c]
if move_count<=3 and len(stuff)>0:
program+=[stuff]
elif len(stuff)>0:
segments=segmentize(coord,stuff,1)
program+=segments
for c in coord:
if c in stuff:
coord[c]=stuff[c]
for line in program:
abcline=getABC(line)
for letter in prefixes:
if letter in abcline and letter in commands:
f2.write(letter+str(abcline[letter])+" ")
elif letter in abcline:
f2.write(letter+str(round(abcline[letter],3))+" ")
f2.write("\n")
f2.write("G1 X"+str(start_positions[0])+" Y"+str(start_positions[1])+" Z"+str(start_positions[2])+"\n")
f2.close()
print "done"
|
pjoyce42/ConceptFORGE
|
LISA Simpson/GCODE PREPROCESSOR/simpson segmentize.py
|
Python
|
gpl-3.0
| 3,954
|
# -*- coding: utf-8 -*-
import os
import sys
import setuptools
from flake8.engine import get_parser, get_style_guide
from flake8.util import is_flag, flag_on
if sys.platform.startswith('win'):
DEFAULT_CONFIG = os.path.expanduser(r'~\.flake8')
else:
DEFAULT_CONFIG = os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),
'flake8'
)
EXTRA_IGNORE = []
def main():
"""Parse options and run checks on Python source."""
# Prepare
flake8_style = get_style_guide(parse_argv=True, config_file=DEFAULT_CONFIG)
options = flake8_style.options
if options.install_hook:
from flake8.hooks import install_hook
install_hook()
# Run the checkers
report = flake8_style.check_files()
exit_code = print_report(report, flake8_style)
raise SystemExit(exit_code > 0)
def print_report(report, flake8_style):
# Print the final report
options = flake8_style.options
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
if not options.exit_zero:
return 1
return 0
def check_file(path, ignore=(), complexity=-1):
"""Checks a file using pep8 and pyflakes by default and mccabe
optionally.
:param str path: path to the file to be checked
:param tuple ignore: (optional), error and warning codes to be ignored
:param int complexity: (optional), enables the mccabe check for values > 0
"""
ignore = set(ignore).union(EXTRA_IGNORE)
flake8_style = get_style_guide(
config_file=DEFAULT_CONFIG, ignore=ignore, max_complexity=complexity)
return flake8_style.input_file(path)
def check_code(code, ignore=(), complexity=-1):
"""Checks code using pep8 and pyflakes by default and mccabe optionally.
:param str code: code to be checked
:param tuple ignore: (optional), error and warning codes to be ignored
:param int complexity: (optional), enables the mccabe check for values > 0
"""
ignore = set(ignore).union(EXTRA_IGNORE)
flake8_style = get_style_guide(
config_file=DEFAULT_CONFIG, ignore=ignore, max_complexity=complexity)
return flake8_style.input_file(None, lines=code.splitlines(True))
class Flake8Command(setuptools.Command):
"""The :class:`Flake8Command` class is used by setuptools to perform
checks on registered modules.
"""
description = "Run flake8 on modules registered in setuptools"
user_options = []
def initialize_options(self):
self.option_to_cmds = {}
parser = get_parser()[0]
for opt in parser.option_list:
cmd_name = opt._long_opts[0][2:]
option_name = cmd_name.replace('-', '_')
self.option_to_cmds[option_name] = cmd_name
setattr(self, option_name, None)
def finalize_options(self):
self.options_dict = {}
for (option_name, cmd_name) in self.option_to_cmds.items():
if option_name in ['help', 'verbose']:
continue
value = getattr(self, option_name)
if value is None:
continue
if is_flag(value):
value = flag_on(value)
self.options_dict[option_name] = value
def distribution_files(self):
if self.distribution.packages:
package_dirs = self.distribution.package_dir or {}
for package in self.distribution.packages:
pkg_dir = package
if package in package_dirs:
pkg_dir = package_dirs[package]
elif '' in package_dirs:
pkg_dir = package_dirs[''] + os.path.sep + pkg_dir
yield pkg_dir.replace('.', os.path.sep)
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
yield "%s.py" % filename
# Don't miss the setup.py file itself
yield "setup.py"
def run(self):
# Prepare
paths = list(self.distribution_files())
flake8_style = get_style_guide(config_file=DEFAULT_CONFIG,
paths=paths,
**self.options_dict)
# Run the checkers
report = flake8_style.check_files()
exit_code = print_report(report, flake8_style)
raise SystemExit(exit_code > 0)
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/flake8/main.py
|
Python
|
bsd-3-clause
| 4,526
|
from collections import defaultdict
from itertools import chain
class CyclicGraphError(ValueError):
"""
This exception is raised if the graph is Cyclic (or rather, when the
sorting algorithm *knows* that the graph is Cyclic by hitting a snag
in the top-sort)
"""
pass
class Network(object):
"""
This object (the `Network` object) handles keeping track of all the
graph's nodes, and links between the nodes.
The `Network' object is mostly used to topologically sort the nodes,
to handle dependency resolution.
"""
def __init__(self):
self.nodes = set()
self.edges = defaultdict(set)
def add_node(self, node):
""" Add a node to the graph (with no edges) """
self.nodes.add(node)
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
\-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to)
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node)
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest
|
influence-usa/pupa
|
pupa/utils/topsort.py
|
Python
|
bsd-3-clause
| 4,925
|
import io
from fixtures import *
import tarfile
import gzip
def test_backing_up_config_dir():
compressed_tar_blob = manager.backup.config_dir()
tar = tarfile.open(fileobj=io.BytesIO(compressed_tar_blob), mode='r:*')
# The configuration directory has 9 files in it, plus the config directory
assert len(tar.getmembers()) == 9+1
|
Jumpscale/openwrt-remote-manager
|
tests/backup_test.py
|
Python
|
mit
| 346
|
"""
OAI INTEGRATION
"""
|
dcosentino/edx-platform
|
lms/djangoapps/oai/__init__.py
|
Python
|
agpl-3.0
| 24
|
"""API to expose the latest Liberation Pledge pledgers."""
import datetime
from oauth2client.client import SignedJwtAssertionCredentials
import os
import gspread
from flask import Flask, jsonify, request
from werkzeug.contrib.cache import SimpleCache
SHEET_ID = os.environ["LIBERATION_PLEDGE_SHEET_ID"]
NUM_PLEDGERS_LIMIT = 11
ENTRY_LENGTH_LIMIT = 20
LOG_LOCATION = "/opt/dxe/logs/latest_pledgers"
CACHE_TIMEOUT = 10800 # 3 hours
HEADERS = [
"Submitted On",
"Name",
"City",
"Country",
"Email",
"Address",
"Why are you taking this pledge",
"Checkbox-1", # "Share to Facebook" checkbox
]
RETURN_HEADERS = [
"Submitted On",
"Name",
"City",
"Country",
]
app = Flask(__name__)
cache = SimpleCache()
class cached(object):
def __init__(self, timeout=None):
self.timeout = timeout or CACHE_TIMEOUT
def __call__(self, f):
def decorator(*args, **kwargs):
response = cache.get(request.path)
if response is None:
response = f(*args, **kwargs)
cache.set(request.path, response, self.timeout)
return response
return decorator
def get_gspread_client():
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(
os.environ["GOOGLE_API_CLIENT_EMAIL"],
os.environ["GOOGLE_API_PRIVATE_KEY"],
scope
)
return gspread.authorize(credentials)
gc = get_gspread_client()
def shorten_field(field):
if len(field) >= ENTRY_LENGTH_LIMIT:
return field[:ENTRY_LENGTH_LIMIT - 3] + "..."
return field
@app.route('/pledge/latest_pledgers/<int:num>')
@cached()
def latest_pledgers(num):
"""Returns the last `num` pledgers."""
if num < 1:
return jsonify({"error": "number of entries requested must be a positive integer"})
elif num > NUM_PLEDGERS_LIMIT:
return jsonify({"error": "number of entries requested too high"})
global gc
try:
sheet = gc.open_by_key(SHEET_ID).sheet1
except:
gc = get_gspread_client()
sheet = gc.open_by_key(SHEET_ID).sheet1
if sheet.row_count - 1 < num: # There aren't `num` pledgers (-1 for header row)
latest_values = sheet.get_all_values()[1:]
else:
latest_values = sheet.get_all_values()[-num:]
row_dicts = [dict(zip(HEADERS, row)) for row in latest_values]
cleaned_row_dicts = [{k: v for k, v in row.iteritems() if k in RETURN_HEADERS} for row in row_dicts]
shortened_row_dicts = [{k: shorten_field(v) for k, v in row.iteritems()} for row in cleaned_row_dicts]
for row_dict in shortened_row_dicts:
days_ago = (datetime.datetime.now() - datetime.datetime.strptime(row_dict["Submitted On"], "%m/%d/%Y %H:%M:%S")).days
if days_ago <= 0:
days_ago_str = "Today"
elif days_ago == 1:
days_ago_str = "{} day ago".format(days_ago)
else: # days_ago >= 2
days_ago_str = "{} days ago".format(days_ago)
row_dict["days_ago"] = days_ago_str
return jsonify({"pledgers": list(reversed(shortened_row_dicts))}) # ordered newest to oldest
if __name__ == "__main__":
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(LOG_LOCATION, maxBytes=100000, backupCount=100)
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
app.run()
|
directactioneverywhere/server
|
latest_pledgers/latest_pledgers.py
|
Python
|
gpl-3.0
| 3,486
|
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def clear_analytics_tables(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
UserCount = apps.get_model('analytics', 'UserCount')
StreamCount = apps.get_model('analytics', 'StreamCount')
RealmCount = apps.get_model('analytics', 'RealmCount')
InstallationCount = apps.get_model('analytics', 'InstallationCount')
FillState = apps.get_model('analytics', 'FillState')
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('analytics', '0010_clear_messages_sent_values'),
]
operations = [
migrations.RunPython(clear_analytics_tables),
]
|
showell/zulip
|
analytics/migrations/0011_clear_analytics_tables.py
|
Python
|
apache-2.0
| 954
|
from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
# ex: /polls/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
)
|
ooffeerr/free-games
|
polls/urls.py
|
Python
|
apache-2.0
| 465
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger("stmusic")
class UrlRewriteSTMusic(object):
"""STMusic urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://www.stmusic.org/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php/')
entry['url'] += '/%s.torrent' % (quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteSTMusic, 'stmusic', groups=['urlrewriter'], api_ver=2)
|
qvazzler/Flexget
|
flexget/plugins/urlrewrite/stmusic.py
|
Python
|
mit
| 819
|
# -*- encoding: utf-8 -*-
from pathlib import Path
from os import PathLike
from typing import (
Any,
AnyStr,
ByteString,
Dict,
Generator,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
SupportsInt,
Text,
Tuple,
Type,
TypeVar,
Union,
NewType,
)
PathInfo = NewType("PathInfo", Union[Path, PathLike, Text, ByteString])
class BrowserData(NamedTuple):
os: Text
browser: Text
path: PathInfo
profiles: Optional[Iterable[Text]]
file_tables: Mapping[Text, Iterable[Text]]
table_fields: Mapping[Text, Iterable[Text]]
|
kchawla-pi/united-states-of-browsers
|
united_states_of_browsers/db_merge/imported_annotations.py
|
Python
|
gpl-3.0
| 617
|
def to_cpp(version_info):
return _CppFormatter().format(version_info)
def to_python(version_info):
return _PythonFormatter().format(version_info)
class _Formatter(object):
def __init__(self):
pass
def format(self, version_info):
return self.main_formatter(version_info,
self._format_is_stable(version_info) + self._format_tag_interpretation(version_info))
def _format_is_stable(self, version_info):
if not version_info.git_tag_exists:
return self.is_stable_formatter(False)
else:
tag_interpretation = version_info.interpret_tag_name()
if tag_interpretation is not None:
return self.is_stable_formatter(tag_interpretation.is_stable)
else:
return ""
def _format_tag_interpretation(self, version_info):
tag_interpretation = version_info.interpret_tag_name()
if tag_interpretation is None:
return ""
else:
formatted_version_components = self.version_components_formatter(tag_interpretation.version_components)
return self.tag_interpretation_formatter(tag_interpretation, formatted_version_components)
# ----------------------------------------
# C++ Formatter
# ----------------------------------------
class _CppFormatter(_Formatter):
def main_formatter(self, version_info, other_variables):
return """
// ---------------------------------------------------
// This file is autogenerated by git-version.
// DO NOT MODIFY!
// ---------------------------------------------------
#pragma once
#ifndef MESSMER_GITVERSION_VERSION_H
#define MESSMER_GITVERSION_VERSION_H
namespace version {
constexpr const char *VERSION_STRING = "%s";
constexpr const char *GIT_TAG_NAME = "%s";
constexpr const unsigned int GIT_COMMITS_SINCE_TAG = %d;
constexpr const char *GIT_COMMIT_ID = "%s";
constexpr bool MODIFIED_SINCE_COMMIT = %s;
constexpr bool IS_DEV_VERSION = %s;
%s
}
#endif
""" % (version_info.version_string, version_info.git_tag_name, version_info.git_commits_since_tag,
version_info.git_commit_id, str(version_info.modified_since_commit).lower(), str(version_info.is_dev).lower(),
other_variables)
def is_stable_formatter(self, is_stable):
return """
constexpr bool IS_STABLE_VERSION = %s;
""" % str(is_stable).lower()
def tag_interpretation_formatter(self, tag_interpretation, version_components):
return """
constexpr const char *VERSION_COMPONENTS[] = %s;
constexpr const char *VERSION_TAG = "%s";
""" % (version_components, tag_interpretation.version_tag)
def version_components_formatter(self, version_components):
return "{\"" + "\", \"".join(version_components) + "\"}"
# ----------------------------------------
# Python Formatter
# ----------------------------------------
class _PythonFormatter(_Formatter):
def main_formatter(self, version_info, other_variables):
return """
# ---------------------------------------------------
# This file is autogenerated by git-version.
# DO NOT MODIFY!
# ---------------------------------------------------
VERSION_STRING = "%s"
GIT_TAG_NAME = "%s"
GIT_COMMITS_SINCE_TAG = %d
GIT_COMMIT_ID = "%s"
MODIFIED_SINCE_COMMIT = %s
IS_DEV_VERSION = %s
%s
""" % (version_info.version_string, version_info.git_tag_name, version_info.git_commits_since_tag,
version_info.git_commit_id, version_info.modified_since_commit, version_info.is_dev, other_variables)
def is_stable_formatter(self, is_stable):
return """
IS_STABLE_VERSION = %s
""" % is_stable
def tag_interpretation_formatter(self, tag_interpretation, version_components):
return """
VERSION_COMPONENTS = %s
VERSION_TAG = "%s"
""" % (version_components, tag_interpretation.version_tag)
def version_components_formatter(self, version_components):
return "[\"" + "\", \"".join(version_components) + "\"]"
|
smessmer/gitversion
|
src/gitversionbuilder/versioninfooutputter.py
|
Python
|
gpl-3.0
| 3,980
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from .models import CompareRequest
class CompareRequestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CompareRequest
fields = ('id', 'repo', 'pull_request_num', 'base_ref', 'base_sha', 'base_test_run', 'head_ref', 'head_sha',
'head_test_run', 'silent')
class CreateCompareRequestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CompareRequest
fields = ('repo', 'pull_request_num', 'silent')
|
Wikia/sparrow
|
apps/compare_requests/serializers.py
|
Python
|
mit
| 606
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/upload/upload.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from common import crypto
from common import users
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import models
from models import courses
from models import student_work
from modules.upload import upload
from tests.functional import actions
from google.appengine.ext import db
class TextFileUploadHandlerTestCase(actions.TestBase):
"""Tests for TextFileUploadHandler."""
def setUp(self):
super(TextFileUploadHandlerTestCase, self).setUp()
self.contents = 'contents'
self.email = 'user@example.com'
self.headers = {'referer': 'http://localhost/path?query=value#fragment'}
self.unit_id = '1'
actions.login(self.email)
user = users.get_current_user()
actions.logout()
self.user_id = user.user_id()
self.student = models.Student(
is_enrolled=True, key_name=self.email, user_id=self.user_id)
self.student.put()
# Allow protected access for tests. pylint: disable=protected-access
self.xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
def configure_environ_for_current_user(self):
actions.login(self.email)
def get_submission(self, student_key, unit_id):
return db.get(student_work.Submission.get_key(unit_id, student_key))
def test_bad_xsrf_token_returns_400(self):
# Allow protected access for tests. pylint: disable=protected-access
response = self.testapp.post(
upload._POST_ACTION_SUFFIX,
{'form_xsrf_token': 'bad'}, self.headers, expect_errors=True)
self.assertEqual(400, response.status_int)
def test_creates_new_submission(self):
self.configure_environ_for_current_user()
# Allow protected access for tests. pylint: disable=protected-access
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': self.contents,
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
self.assertIsNone(self.get_submission(self.student.key(), self.user_id))
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
submissions = student_work.Submission.all().fetch(2)
self.assertEqual(1, len(submissions))
self.assertEqual(u'"%s"' % self.contents, submissions[0].contents)
def test_empty_contents_returns_400(self):
self.configure_environ_for_current_user()
# Allow protected access for tests. pylint: disable=protected-access
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': '',
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
def test_missing_contents_returns_400(self):
self.configure_environ_for_current_user()
# Allow protected access for tests. pylint: disable=protected-access
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
def test_missing_student_returns_403(self):
# Allow protected access for tests. pylint: disable=protected-access
response = self.testapp.post(
upload._POST_ACTION_SUFFIX,
{'form_xsrf_token': self.xsrf_token}, self.headers,
expect_errors=True)
self.assertEqual(403, response.status_int)
def test_missing_xsrf_token_returns_400(self):
# Allow protected access for tests. pylint: disable=protected-access
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, {}, self.headers, expect_errors=True)
self.assertEqual(400, response.status_int)
def test_updates_existing_submission(self):
self.configure_environ_for_current_user()
# Allow protected access for tests. pylint: disable=protected-access
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': 'old',
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
self.assertIsNone(self.get_submission(self.student.key(), self.user_id))
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
params['contents'] = self.contents
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
submissions = student_work.Submission.all().fetch(2)
self.assertEqual(1, len(submissions))
self.assertEqual(u'"%s"' % self.contents, submissions[0].contents)
def test_unsavable_contents_returns_400(self):
self.configure_environ_for_current_user()
# Allow protected access for tests. pylint: disable=protected-access
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
# Entity size = contents + other data, so 1MB here will overlfow.
'contents': 'a' * 1024 * 1024,
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
class TextFileUploadTagTestCase(actions.TestBase):
_ADMIN_EMAIL = 'admin@foo.com'
_COURSE_NAME = 'upload_test'
_STUDENT_EMAIL = 'student@foo.com'
def setUp(self):
super(TextFileUploadTagTestCase, self).setUp()
self.base = '/' + self._COURSE_NAME
self.app_context = actions.simple_add_course(
self._COURSE_NAME, self._ADMIN_EMAIL, 'Upload File Tag Test')
self.course = courses.Course(None, self.app_context)
actions.login(self._STUDENT_EMAIL, is_admin=True)
actions.register(self, 'S. Tudent')
def tearDown(self):
sites.reset_courses()
super(TextFileUploadTagTestCase, self).tearDown()
def test_tag_in_assessment(self):
assessment = self.course.add_assessment()
assessment.html_content = (
'<text-file-upload-tag '
' display_length="100" instanceid="this-tag-id">'
'</text-file-upload-tag>')
self.course.save()
response = self.get('assessment?name=%s' % assessment.unit_id)
dom = self.parse_html_string(response.body)
form = dom.find('.//div[@class="user-upload-form"]')
file_input = form.find('.//input[@type="file"]')
submit = form.find('.//input[@type="submit"]')
self.assertIsNotNone(file_input)
self.assertIsNotNone(submit)
self.assertEquals('100', file_input.attrib['size'])
# The tag is not disabled
self.assertNotIn('disabled', file_input.attrib)
self.assertNotIn('disabled', submit.attrib)
def test_tag_before_and_after_submission(self):
assessment = self.course.add_assessment()
assessment.html_content = (
'<text-file-upload-tag '
' display_length="100" instanceid="this-tag-id">'
'</text-file-upload-tag>')
self.course.save()
response = self.get('assessment?name=%s' % assessment.unit_id)
dom = self.parse_html_string(response.body)
warning = dom.find(
'.//*[@class="user-upload-form-warning"]').text.strip()
self.assertEquals('Maximum file size is 1MB.', warning)
with common_utils.Namespace('ns_' + self._COURSE_NAME):
student, _ = models.Student.get_first_by_email(self._STUDENT_EMAIL)
student_work.Submission.write(
assessment.unit_id, student.get_key(), 'contents')
response = self.get('assessment?name=%s' % assessment.unit_id)
dom = self.parse_html_string(response.body)
warning = dom.find(
'.//*[@class="user-upload-form-warning"]').text.strip()
self.assertEquals(
'You have already submitted; submit again to replace your previous '
'entry.', warning)
def test_tag_in_oeditor_preview_is_visible_but_disabled(self):
response = self.post('oeditor/preview', {
'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token(
'oeditor-preview-handler'),
'value': (
'<text-file-upload-tag '
' display_length="100" instanceid="this-tag-id">'
'</text-file-upload-tag>')
})
dom = self.parse_html_string(response.body)
form = dom.find('.//div[@class="user-upload-form"]')
file_input = form.find('.//input[@type="file"]')
submit = form.find('.//input[@type="submit"]')
self.assertIsNotNone(file_input)
self.assertIsNotNone(submit)
self.assertEquals('100', file_input.attrib['size'])
# The tag is disabled
self.assertEquals('disabled', file_input.attrib['disabled'])
self.assertEquals('disabled', submit.attrib['disabled'])
|
ram8647/gcb-mobilecsp
|
modules/upload/upload_tests.py
|
Python
|
apache-2.0
| 10,543
|
#!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:150]
# -*- coding: utf-8 -*-
"""
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2016 Co-Pierre Georg (co-pierre.georg@keble.ox.ac.uk)
Pawel Fiedor (pawel@fiedor.eu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from abm_template.src.basetransaction import BaseTransaction
# -------------------------------------------------------------------------
#
# class Transaction
#
# -------------------------------------------------------------------------
class Transaction(BaseTransaction):
#
#
# VARIABLES
#
#
identifier = None # unique identifier of the transaction, may be useful for iterators
type_ = "" # type of transactions, e.g. "deposit"
asset = "" # type of asset, used for investment types
from_ = 0.0 # agent being the originator of the transaction
to = 0.0 # agent being the recipient of the transaction
amount = 0.0 # amount of the transaction
interest = 0.0 # interest rate paid to the originator each time step
maturity = 0 # time (in steps) to maturity
# this is used only for loans I, and will be > 0 for defaulting loans. with each update step, it is reduced by 1
# if timeOfDefault == 0: loan defaults
time_of_default = -1 # control variable checking for defaulted transactions
#
#
# METHODS
#
#
# -------------------------------------------------------------------------
# __init__
# Generate a unique identifier of the transaction
# This may be useful for looping over various agent's accounts
# -------------------------------------------------------------------------
def __init__(self):
self.identifier = None # unique identifier of the transaction, may be useful for iterators
self.type_ = "" # type of transactions, e.g. "deposit"
self.asset = "" # type of asset, used for investment types
self.from_ = 0.0 # agent being the originator of the transaction
self.to = 0.0 # agent being the recipient of the transaction
self.amount = 0.0 # amount of the transaction
self.interest = 0.0 # interest rate paid to the originator each time step
self.maturity = 0 # time (in steps) to maturity
# this is used only for loans I, and will be > 0 for defaulting loans. with each update step, it is reduced by 1
# if timeOfDefault == 0: loan defaults
self.time_of_default = -1 # control variable checking for defaulted transactions
super(Transaction, self).__init__()
# ------------------------------------------------------------------------
# -------------------------------------------------------------------------
# __del__()
# removes the transaction from appropriate accounts and deletes the instance
# if transaction hasn't been properly added there is no need to change accounts
# DO NOT USE IN PRODUCTION, this is a failsafe
# use remove_transaction() to take transaction off the books
# -------------------------------------------------------------------------
def __del__(self):
super(Transaction, self).__del__()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# functions for setting/changing variables
# these either return or set specific value to the above variables
# -------------------------------------------------------------------------
def get_type_(self):
return self.type_
def set_type_(self, type_):
super(Transaction, self).set_type_(type_)
def get_asset(self):
return self.asset
def set_asset(self, asset):
super(Transaction, self).set_asset(asset)
def get_from_(self):
return self.from_
def set_from_(self, from_):
super(Transaction, self).set_from_(from_)
def get_to(self):
return self.to
def set_to(self, to):
super(Transaction, self).set_to(to)
def get_amount(self):
return self.amount
def set_amount(self, amount):
super(Transaction, self).set_amount(amount)
def get_interest(self):
return self.interest
def set_interest(self, interest):
super(Transaction, self).set_interest(interest)
def get_maturity(self):
return self.maturity
def set_maturity(self, maturity):
super(Transaction, self).set_maturity(maturity)
def get_time_of_default(self):
return self.time_of_default
def set_time_of_default(self, time_of_default):
super(Transaction, self).set_time_of_default(time_of_default)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# this_transaction(type_,
# asset,
# from_,
# to,
# amount,
# interest,
# maturity,
# time_of_default)
# sets the variables of the transaction to the given amounts
# -------------------------------------------------------------------------
def this_transaction(self, type_, asset, from_, to, amount, interest, maturity, time_of_default):
super(Transaction, self).this_transaction(type_, asset, from_, to, amount, interest, maturity, time_of_default)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# add_transaction
# adds the transaction to appropriate agents' accounts
# TODO: we need to make sure we don't do it twice when we iterate over
# transactions in the accounts of agents (this may be tricky)
# -------------------------------------------------------------------------
def add_transaction(self, environment):
super(Transaction, self).add_transaction(environment)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# remove_transaction
# removes the transaction from appropriate agents' accounts
# -------------------------------------------------------------------------
def remove_transaction(self):
super(Transaction, self).remove_transaction()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# print_transaction()
# prints the transaction and its properties
# -------------------------------------------------------------------------
def print_transaction(self):
super(Transaction, self).print_transaction()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# __str__()
# prints the transaction and its properties
# -------------------------------------------------------------------------
def __str__(self):
return super(Transaction, self).write_transaction()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# write_transaction()
# returns a string with the transaction and its properties
# -------------------------------------------------------------------------
def write_transaction(self):
return super(Transaction, self).write_transaction()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# clear_accounts()
# deletes all transactions of a given agent
# this should be used very sparingly, as this does not account
# for the economics of the process
# -------------------------------------------------------------------------
def clear_accounts(self, agent):
super(Transaction, self).clear_accounts(agent)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# purge_accounts()
# removes all transactions of all agents with amount of zero
# -------------------------------------------------------------------------
def purge_accounts(self, environment):
super(Transaction, self).purge_accounts(environment)
# -------------------------------------------------------------------------
|
cogeorg/BlackRhino
|
examples/solow/src/transaction.py
|
Python
|
gpl-3.0
| 9,256
|
import os
import pandas as pd
#import seaborn as sns
total_reads = pd.read_csv('../data/sample_info/sample_read_counts.tsv', sep='\t', names = ['fastq filename', 'number of reads'])
total_reads['cryptic metagenome name'] = total_reads['fastq filename'].str.strip('.fastq.gz')
sample_info = pd.read_csv('../data/sample_info/sample_info.tsv', sep='\t')
sample_translation = pd.read_csv('../data/sample_info/meta4_sample_names--cryptic_to_sample_number.tsv', sep='\t')
read_mappings = pd.read_csv('./data/num_reads_mapped--can_double_count_multiple_mappings.tsv', sep='\t')
reads = pd.merge(sample_info, sample_translation)
reads = pd.merge(reads, total_reads)
reads = pd.merge(reads, read_mappings)
out_path = 'total_num_reads_across_samples_with_sample_info.tsv'
out_dir = './data'
reads.to_csv(os.path.join(out_dir, out_path), sep='\t', index=False)
|
dacb/assembly_and_binning
|
assess_unbinned_reads_across_samples/assess_unbinned_reads_across_samples.py
|
Python
|
mit
| 858
|
from __future__ import print_function, division, absolute_import
import functools
import locale
import weakref
import llvmlite.llvmpy.core as lc
import llvmlite.llvmpy.passes as lp
import llvmlite.binding as ll
import llvmlite.ir as llvmir
from numba import config, utils
from numba.runtime.atomicops import remove_redundant_nrt_refct
_x86arch = frozenset(['x86', 'i386', 'i486', 'i586', 'i686', 'i786',
'i886', 'i986'])
def _is_x86(triple):
arch = triple.split('-')[0]
return arch in _x86arch
def dump(header, body):
print(header.center(80, '-'))
print(body)
print('=' * 80)
class CodeLibrary(object):
"""
An interface for bundling LLVM code together and compiling it.
It is tied to a *codegen* instance (e.g. JITCPUCodegen) that will
determine how the LLVM code is transformed and linked together.
"""
_finalized = False
_object_caching_enabled = False
def __init__(self, codegen, name):
self._codegen = codegen
self._name = name
self._linking_libraries = set()
self._final_module = ll.parse_assembly(
str(self._codegen._create_empty_module(self._name)))
self._final_module.name = self._name
# Remember this on the module, for the object cache hooks
self._final_module.__library = weakref.proxy(self)
self._shared_module = None
@property
def codegen(self):
"""
The codegen object owning this library.
"""
return self._codegen
def __repr__(self):
return "<Library %r at 0x%x>" % (self._name, id(self))
def _raise_if_finalized(self):
if self._finalized:
raise RuntimeError("operation impossible on finalized object %r"
% (self,))
def _ensure_finalized(self):
if not self._finalized:
self.finalize()
def _optimize_functions(self, ll_module):
"""
Internal: run function-level optimizations inside *ll_module*.
"""
# Enforce data layout to enable layout-specific optimizations
ll_module.data_layout = self._codegen._data_layout
with self._codegen._function_pass_manager(ll_module) as fpm:
# Run function-level optimizations to reduce memory usage and improve
# module-level optimization.
for func in ll_module.functions:
fpm.initialize()
fpm.run(func)
fpm.finalize()
def _optimize_final_module(self):
"""
Internal: optimize this library's final module.
"""
self._codegen._mpm.run(self._final_module)
def _get_module_for_linking(self):
"""
Internal: get a LLVM module suitable for linking multiple times
into another library. Exported functions are made "linkonce_odr"
to allow for multiple definitions, inlining, and removal of
unused exports.
See discussion in https://github.com/numba/numba/pull/890
"""
self._ensure_finalized()
if self._shared_module is not None:
return self._shared_module
mod = self._final_module
to_fix = []
nfuncs = 0
for fn in mod.functions:
nfuncs += 1
if not fn.is_declaration and fn.linkage == ll.Linkage.external:
to_fix.append(fn.name)
if nfuncs == 0:
# This is an issue which can occur if loading a module
# from an object file and trying to link with it, so detect it
# here to make debugging easier.
raise RuntimeError("library unfit for linking: "
"no available functions in %s"
% (self,))
if to_fix:
mod = mod.clone()
for name in to_fix:
# NOTE: this will mark the symbol WEAK if serialized
# to an ELF file
mod.get_function(name).linkage = 'linkonce_odr'
self._shared_module = mod
return mod
def create_ir_module(self, name):
"""
Create a LLVM IR module for use by this library.
"""
self._raise_if_finalized()
ir_module = self._codegen._create_empty_module(name)
return ir_module
def add_linking_library(self, library):
"""
Add a library for linking into this library, without losing
the original library.
"""
library._ensure_finalized()
self._linking_libraries.add(library)
def add_ir_module(self, ir_module):
"""
Add a LLVM IR module's contents to this library.
"""
self._raise_if_finalized()
assert isinstance(ir_module, llvmir.Module)
ll_module = ll.parse_assembly(str(ir_module))
ll_module.name = ir_module.name
ll_module.verify()
self.add_llvm_module(ll_module)
def add_llvm_module(self, ll_module):
self._optimize_functions(ll_module)
# TODO: we shouldn't need to recreate the LLVM module object
ll_module = remove_redundant_nrt_refct(ll_module)
self._final_module.link_in(ll_module)
def finalize(self):
"""
Finalize the library. After this call, nothing can be added anymore.
Finalization involves various stages of code optimization and
linking.
"""
# Report any LLVM-related problems to the user
self._codegen._check_llvm_bugs()
self._raise_if_finalized()
if config.DUMP_FUNC_OPT:
dump("FUNCTION OPTIMIZED DUMP %s" % self._name, self.get_llvm_str())
# Link libraries for shared code
for library in self._linking_libraries:
self._final_module.link_in(
library._get_module_for_linking(), preserve=True)
for library in self._codegen._libraries:
self._final_module.link_in(
library._get_module_for_linking(), preserve=True)
# Optimize the module after all dependences are linked in above,
# to allow for inlining.
self._optimize_final_module()
self._final_module.verify()
self._finalize_final_module()
def _finalize_final_module(self):
"""
Make the underlying LLVM module ready to use.
"""
# It seems add_module() must be done only here and not before
# linking in other modules, otherwise get_pointer_to_function()
# could fail.
cleanup = self._codegen._add_module(self._final_module)
if cleanup:
utils.finalize(self, cleanup)
self._finalize_specific()
self._finalized = True
if config.DUMP_OPTIMIZED:
dump("OPTIMIZED DUMP %s" % self._name, self.get_llvm_str())
if config.DUMP_ASSEMBLY:
# CUDA backend cannot return assembly this early, so don't
# attempt to dump assembly if nothing is produced.
asm = self.get_asm_str()
if asm:
dump("ASSEMBLY %s" % self._name, self.get_asm_str())
def get_defined_functions(self):
"""
Get all functions defined in the library. The library must have
been finalized.
"""
mod = self._final_module
for fn in mod.functions:
if not fn.is_declaration:
yield fn
def get_function(self, name):
return self._final_module.get_function(name)
def get_llvm_str(self):
"""
Get the human-readable form of the LLVM module.
"""
return str(self._final_module)
def get_asm_str(self):
"""
Get the human-readable assembly.
"""
return str(self._codegen._tm.emit_assembly(self._final_module))
#
# Object cache hooks and serialization
#
def enable_object_caching(self):
self._object_caching_enabled = True
self._compiled_object = None
self._compiled = False
def _get_compiled_object(self):
if not self._object_caching_enabled:
raise ValueError("object caching not enabled in %s" % (self,))
if self._compiled_object is None:
raise RuntimeError("no compiled object yet for %s" % (self,))
return self._compiled_object
def _set_compiled_object(self, value):
if not self._object_caching_enabled:
raise ValueError("object caching not enabled in %s" % (self,))
if self._compiled:
raise ValueError("library already compiled: %s" % (self,))
self._compiled_object = value
@classmethod
def _dump_elf(cls, buf):
"""
Dump the symbol table of an ELF file.
Needs pyelftools (https://github.com/eliben/pyelftools)
"""
from elftools.elf.elffile import ELFFile
from elftools.elf import descriptions
from io import BytesIO
f = ELFFile(BytesIO(buf))
print("ELF file:")
for sec in f.iter_sections():
if sec['sh_type'] == 'SHT_SYMTAB':
symbols = sorted(sec.iter_symbols(), key=lambda sym: sym.name)
print(" symbols:")
for sym in symbols:
if not sym.name:
continue
print(" - %r: size=%d, value=0x%x, type=%s, bind=%s"
% (sym.name.decode(),
sym['st_size'],
sym['st_value'],
descriptions.describe_symbol_type(sym['st_info']['type']),
descriptions.describe_symbol_bind(sym['st_info']['bind']),
))
print()
@classmethod
def _object_compiled_hook(cls, ll_module, buf):
"""
`ll_module` was compiled into object code `buf`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled:
self._compiled = True
self._compiled_object = buf
@classmethod
def _object_getbuffer_hook(cls, ll_module):
"""
Return a cached object code for `ll_module`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled and self._compiled_object:
buf = self._compiled_object
self._compiled_object = None
return buf
def serialize_using_bitcode(self):
"""
Serialize this library using its bitcode as the cached representation.
"""
self._ensure_finalized()
return (self._name, 'bitcode', self._final_module.as_bitcode())
def serialize_using_object_code(self):
"""
Serialize this library using its object code as the cached
representation. We also include its bitcode for further inlining
with other libraries.
"""
self._ensure_finalized()
data = (self._get_compiled_object(),
self._get_module_for_linking().as_bitcode())
return (self._name, 'object', data)
@classmethod
def _unserialize(cls, codegen, state):
name, kind, data = state
self = codegen.create_library(name)
assert isinstance(self, cls)
if kind == 'bitcode':
# No need to re-run optimizations, just make the module ready
self._final_module = ll.parse_bitcode(data)
self._finalize_final_module()
return self
elif kind == 'object':
object_code, shared_bitcode = data
self.enable_object_caching()
self._set_compiled_object(object_code)
self._shared_module = ll.parse_bitcode(shared_bitcode)
self._finalize_final_module()
return self
else:
raise ValueError("unsupported serialization kind %r" % (kind,))
class AOTCodeLibrary(CodeLibrary):
def emit_native_object(self):
"""
Return this library as a native object (a bytestring) -- for example
ELF under Linux.
This function implicitly calls .finalize().
"""
self._ensure_finalized()
return self._codegen._tm.emit_object(self._final_module)
def emit_bitcode(self):
"""
Return this library as LLVM bitcode (a bytestring).
This function implicitly calls .finalize().
"""
self._ensure_finalized()
return self._final_module.as_bitcode()
def _finalize_specific(self):
pass
class JITCodeLibrary(CodeLibrary):
def get_pointer_to_function(self, name):
"""
Generate native code for function named *name* and return a pointer
to the start of the function (as an integer).
This function implicitly calls .finalize().
"""
self._ensure_finalized()
return self._codegen._engine.get_function_address(name)
def _finalize_specific(self):
self._codegen._engine.finalize_object()
class BaseCPUCodegen(object):
_llvm_initialized = False
def __init__(self, module_name):
initialize_llvm()
self._libraries = set()
self._data_layout = None
self._llvm_module = ll.parse_assembly(
str(self._create_empty_module(module_name)))
self._llvm_module.name = "global_codegen_module"
self._init(self._llvm_module)
def _init(self, llvm_module):
assert list(llvm_module.global_variables) == [], "Module isn't empty"
target = ll.Target.from_triple(ll.get_process_triple())
tm_options = dict(opt=config.OPT)
self._tm_features = self._customize_tm_features()
self._customize_tm_options(tm_options)
tm = target.create_target_machine(**tm_options)
engine = ll.create_mcjit_compiler(llvm_module, tm)
self._tm = tm
self._engine = engine
self._target_data = engine.target_data
self._data_layout = str(self._target_data)
self._mpm = self._module_pass_manager()
self._engine.set_object_cache(self._library_class._object_compiled_hook,
self._library_class._object_getbuffer_hook)
def _create_empty_module(self, name):
ir_module = lc.Module(name)
ir_module.triple = ll.get_process_triple()
if self._data_layout:
ir_module.data_layout = self._data_layout
return ir_module
@property
def target_data(self):
"""
The LLVM "target data" object for this codegen instance.
"""
return self._target_data
def add_linking_library(self, library):
"""
Add a library for linking into all libraries created by this
codegen object, without losing the original library.
"""
library._ensure_finalized()
self._libraries.add(library)
def create_library(self, name):
"""
Create a :class:`CodeLibrary` object for use with this codegen
instance.
"""
return self._library_class(self, name)
def unserialize_library(self, serialized):
return self._library_class._unserialize(self, serialized)
def _module_pass_manager(self):
pm = ll.create_module_pass_manager()
dl = ll.create_target_data(self._data_layout)
dl.add_pass(pm)
self._tm.add_analysis_passes(pm)
with self._pass_manager_builder() as pmb:
pmb.populate(pm)
return pm
def _function_pass_manager(self, llvm_module):
pm = ll.create_function_pass_manager(llvm_module)
self._target_data.add_pass(pm)
self._tm.add_analysis_passes(pm)
with self._pass_manager_builder() as pmb:
pmb.populate(pm)
return pm
def _pass_manager_builder(self):
"""
Create a PassManagerBuilder.
Note: a PassManagerBuilder seems good only for one use, so you
should call this method each time you want to populate a module
or function pass manager. Otherwise some optimizations will be
missed...
"""
pmb = lp.create_pass_manager_builder(
opt=config.OPT, loop_vectorize=config.LOOP_VECTORIZE)
return pmb
def _check_llvm_bugs(self):
"""
Guard against some well-known LLVM bug(s).
"""
# Check the locale bug at https://github.com/numba/numba/issues/1569
# Note we can't cache the result as locale settings can change
# accross a process's lifetime. Also, for this same reason,
# the check here is a mere heuristic (there may be a race condition
# between now and actually compiling IR).
ir = """
define double @func()
{
ret double 1.23e+01
}
"""
mod = ll.parse_assembly(ir)
ir_out = str(mod)
if "12.3" in ir_out or "1.23" in ir_out:
# Everything ok
return
if "1.0" in ir_out:
loc = locale.getlocale()
raise RuntimeError(
"LLVM will produce incorrect floating-point code "
"in the current locale %s.\nPlease read "
"http://numba.pydata.org/numba-doc/dev/user/faq.html#llvm-locale-bug "
"for more information."
% (loc,))
raise AssertionError("Unexpected IR:\n%s\n" % (ir_out,))
def magic_tuple(self):
"""
Return a tuple unambiguously describing the codegen behaviour.
"""
return (self._llvm_module.triple, ll.get_host_cpu_name(),
self._tm_features)
class AOTCPUCodegen(BaseCPUCodegen):
"""
A codegen implementation suitable for Ahead-Of-Time compilation
(e.g. generation of object files).
"""
_library_class = AOTCodeLibrary
def __init__(self, module_name, cpu_name=None):
# By default, use generic cpu model for the arch
self._cpu_name = cpu_name or ''
BaseCPUCodegen.__init__(self, module_name)
def _customize_tm_options(self, options):
cpu_name = self._cpu_name
if cpu_name == 'host':
cpu_name = ll.get_host_cpu_name()
options['cpu'] = cpu_name
options['reloc'] = 'pic'
options['codemodel'] = 'default'
options['features'] = self._tm_features
def _customize_tm_features(self):
# ISA features are selected according to the requested CPU model
# in _customize_tm_options()
return ''
def _add_module(self, module):
pass
class JITCPUCodegen(BaseCPUCodegen):
"""
A codegen implementation suitable for Just-In-Time compilation.
"""
_library_class = JITCodeLibrary
def _customize_tm_options(self, options):
# As long as we don't want to ship the code to another machine,
# we can specialize for this CPU.
options['cpu'] = ll.get_host_cpu_name()
options['reloc'] = 'default'
options['codemodel'] = 'jitdefault'
# Set feature attributes (such as ISA extensions)
# This overrides default feature selection by CPU model above
options['features'] = self._tm_features
# Enable JIT debug
options['jitdebug'] = True
def _customize_tm_features(self):
# For JIT target, we will use LLVM to get the feature map
features = ll.get_host_cpu_features()
if not config.ENABLE_AVX:
# Disable all features with name starting with 'avx'
for k in features:
if k.startswith('avx'):
features[k] = False
# Set feature attributes
return features.flatten()
def _add_module(self, module):
self._engine.add_module(module)
# Early bind the engine method to avoid keeping a reference to self.
return functools.partial(self._engine.remove_module, module)
_llvm_initialized = False
def initialize_llvm():
global _llvm_initialized
if not _llvm_initialized:
ll.initialize()
ll.initialize_native_target()
ll.initialize_native_asmprinter()
_llvm_initialized = True
|
stefanseefeld/numba
|
numba/targets/codegen.py
|
Python
|
bsd-2-clause
| 20,232
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="marker", parent_name="scattermapbox.selected", **kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
""",
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scattermapbox/selected/_marker.py
|
Python
|
mit
| 769
|
import nltk
import re
import pickle
import json
import os.path as path
def LoadData(datalist):
database = []
for datafile in datalist:
f = open(datafile)
line = f.readline()
f.close()
raw_data = json.loads(str(line.strip()))
database = PushData(raw_data, database)
return database
def PushData(data, database):
for pair in data:
word = nltk.word_tokenize(pair['question']) +nltk.word_tokenize(pair['answer'])
database = database +[item.lower() for item in word]
return database
datalist =[line.strip() for line in open('cnn_qa_human_response_name_high_app.list')]
database = LoadData(datalist)
database = list(set(database))
with open('dictionary_value.pkl','w') as f:
pickle.dump(database,f)
|
leahrnh/ticktock_text_api
|
make_dictionary.py
|
Python
|
gpl-2.0
| 740
|
# vim:set fileencoding=utf-8
import unittest
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from mock import Mock
import blivet
from blivet.errors import BTRFSValueError
from blivet.errors import DeviceError
from blivet.devices import BTRFSSnapShotDevice
from blivet.devices import BTRFSSubVolumeDevice
from blivet.devices import BTRFSVolumeDevice
from blivet.devices import DiskDevice
from blivet.devices import MDBiosRaidArrayDevice
from blivet.devices import MDContainerDevice
from blivet.devices import MDRaidArrayDevice
from blivet.devices import OpticalDevice
from blivet.devices import StorageDevice
from blivet.devices import ParentList
from blivet.devicelibs import btrfs
from blivet.devicelibs import mdraid
from blivet.size import Size
from blivet.formats import getFormat
BTRFS_MIN_MEMBER_SIZE = getFormat("btrfs").minSize
def xform(func):
""" Simple wrapper function that transforms a function that takes
a precalculated value and a message to a function that takes
a device and an attribute name, evaluates the attribute, and
passes the value and the attribute name as the message to the
original function.
:param func: The function to be transformed.
:type func: (object * str) -> None
:returns: a function that gets the attribute and passes it to func
:rtype: (object * str) -> None
"""
return lambda d, a: func(getattr(d, a), a)
class DeviceStateTestCase(unittest.TestCase):
"""A class which implements a simple method of checking the state
of a device object.
"""
def __init__(self, methodName='runTest'):
self._state_functions = {
"currentSize" : xform(lambda x, m: self.assertEqual(x, Size(0), m)),
"direct" : xform(self.assertTrue),
"exists" : xform(self.assertFalse),
"format" : xform(self.assertIsNotNone),
"formatArgs" : xform(lambda x, m: self.assertEqual(x, [], m)),
"isDisk" : xform(self.assertFalse),
"isleaf" : xform(self.assertTrue),
"major" : xform(lambda x, m: self.assertEqual(x, 0, m)),
"maxSize" : xform(lambda x, m: self.assertEqual(x, Size(0), m)),
"mediaPresent" : xform(self.assertTrue),
"minor" : xform(lambda x, m: self.assertEqual(x, 0, m)),
"parents" : xform(lambda x, m: self.assertEqual(len(x), 0, m) and
self.assertIsInstance(x, ParentList, m)),
"partitionable" : xform(self.assertFalse),
"path" : xform(lambda x, m: self.assertRegex(x, "^/dev", m)),
"raw_device" : xform(self.assertIsNotNone),
"resizable" : xform(self.assertFalse),
"size" : xform(lambda x, m: self.assertEqual(x, Size(0), m)),
"status" : xform(self.assertFalse),
"sysfsPath" : xform(lambda x, m: self.assertEqual(x, "", m)),
"targetSize" : xform(lambda x, m: self.assertEqual(x, Size(0), m)),
"type" : xform(lambda x, m: self.assertEqual(x, "mdarray", m)),
"uuid" : xform(self.assertIsNone)
}
super(DeviceStateTestCase, self).__init__(methodName=methodName)
def stateCheck(self, device, **kwargs):
"""Checks the current state of a device by means of its
fields or properties.
Every kwarg should be a key which is a field or property
of a Device and a value which is a function of
two parameters and should call the appropriate assert* functions.
These values override those in the state_functions dict.
If the value is None, then the test starts the debugger instead.
"""
self.longMessage = True
for k,v in self._state_functions.items():
if k in kwargs:
test_func = kwargs[k]
if test_func is None:
import pdb
pdb.set_trace()
getattr(device, k)
else:
test_func(device, k)
else:
v(device, k)
class MDRaidArrayDeviceTestCase(DeviceStateTestCase):
"""Note that these tests postdate the code that they test.
Therefore, they capture the behavior of the code as it is now,
not necessarily its intended or correct behavior. See the initial
commit message for this file for further details.
"""
def __init__(self, methodName='runTest'):
super(MDRaidArrayDeviceTestCase, self).__init__(methodName=methodName)
state_functions = {
"createBitmap" : xform(lambda d, a: self.assertFalse),
"description" : xform(self.assertIsNotNone),
"formatClass" : xform(self.assertIsNotNone),
"level" : xform(self.assertIsNone),
"mdadmFormatUUID" : xform(self.assertIsNone),
"memberDevices" : xform(lambda x, m: self.assertEqual(x, 0, m)),
"members" : xform(lambda x, m: self.assertEqual(len(x), 0, m) and
self.assertIsInstance(x, list, m)),
"metadataVersion" : xform(lambda x, m: self.assertEqual(x, "default", m)),
"spares" : xform(lambda x, m: self.assertEqual(x, 0, m)),
"totalDevices" : xform(lambda x, m: self.assertEqual(x, 0, m))
}
self._state_functions.update(state_functions)
def setUp(self):
self.md_chunk_size = mdraid.MD_CHUNK_SIZE
mdraid.MD_CHUNK_SIZE = Size("1 MiB")
self.get_super_block_size = MDRaidArrayDevice.getSuperBlockSize
MDRaidArrayDevice.getSuperBlockSize = lambda a, s: Size(0)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember"))
]
self.dev1 = MDContainerDevice("dev1", level="container", parents=parents, totalDevices=1, memberDevices=1)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember"), size=Size("1 GiB")),
DiskDevice("name2", fmt=getFormat("mdmember"), size=Size("1 GiB"))
]
self.dev2 = MDRaidArrayDevice("dev2", level="raid0", parents=parents,
totalDevices=2, memberDevices=2)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
self.dev3 = MDRaidArrayDevice("dev3", level="raid1", parents=parents)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember")),
DiskDevice("name3", fmt=getFormat("mdmember"))
]
self.dev4 = MDRaidArrayDevice("dev4", level="raid4", parents=parents)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember")),
DiskDevice("name3", fmt=getFormat("mdmember"))
]
self.dev5 = MDRaidArrayDevice("dev5", level="raid5", parents=parents)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember")),
DiskDevice("name3", fmt=getFormat("mdmember")),
DiskDevice("name4", fmt=getFormat("mdmember"))
]
self.dev6 = MDRaidArrayDevice("dev6", level="raid6", parents=parents)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember")),
DiskDevice("name3", fmt=getFormat("mdmember")),
DiskDevice("name4", fmt=getFormat("mdmember"))
]
self.dev7 = MDRaidArrayDevice("dev7", level="raid10", parents=parents)
self.dev8 = MDRaidArrayDevice("dev8", level=1, exists=True)
parents_1 = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
dev_1 = MDContainerDevice(
"parent",
level="container",
parents=parents_1,
totalDevices=2,
memberDevices=2
)
self.dev9 = MDBiosRaidArrayDevice(
"dev9",
level="raid0",
memberDevices=1,
parents=[dev_1],
totalDevices=1,
exists=True
)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
self.dev10 = MDRaidArrayDevice(
"dev10",
level="raid0",
parents=parents,
size=Size("32 MiB"))
parents_1 = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
dev_1 = MDContainerDevice(
"parent",
level="container",
parents=parents,
totalDevices=2,
memberDevices=2
)
self.dev11 = MDBiosRaidArrayDevice(
"dev11",
level=1,
exists=True,
parents=[dev_1],
size=Size("32 MiB"))
self.dev13 = MDRaidArrayDevice(
"dev13",
level=0,
memberDevices=2,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
size=Size("32 MiB"),
totalDevices=2)
self.dev14 = MDRaidArrayDevice(
"dev14",
level=4,
memberDevices=3,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
totalDevices=3)
self.dev15 = MDRaidArrayDevice(
"dev15",
level=5,
memberDevices=3,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
totalDevices=3)
self.dev16 = MDRaidArrayDevice(
"dev16",
level=6,
memberDevices=4,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
totalDevices=4)
self.dev17 = MDRaidArrayDevice(
"dev17",
level=10,
memberDevices=4,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
totalDevices=4)
self.dev18 = MDRaidArrayDevice(
"dev18",
level=10,
memberDevices=4,
parents=[
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("4 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")}),
Mock(**{"size": Size("2 MiB"),
"format": getFormat("mdmember")})],
totalDevices=5)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
self.dev19 = MDRaidArrayDevice(
"dev19",
level="raid1",
parents=parents,
uuid='3386ff85-f501-2621-4a43-5f061eb47236'
)
parents = [
DiskDevice("name1", fmt=getFormat("mdmember")),
DiskDevice("name2", fmt=getFormat("mdmember"))
]
self.dev20 = MDRaidArrayDevice(
"dev20",
level="raid1",
parents=parents,
uuid='Just-pretending'
)
def tearDown(self):
mdraid.MD_CHUNK_SIZE = self.md_chunk_size
MDRaidArrayDevice.getSuperBlockSize = self.get_super_block_size
def testMDRaidArrayDeviceInit(self):
"""Tests the state of a MDRaidArrayDevice after initialization.
For some combinations of arguments the initializer will throw
an exception.
"""
##
## level tests
##
self.stateCheck(self.dev1,
level=xform(lambda x, m: self.assertEqual(x.name, "container", m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 1, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
mediaPresent=xform(self.assertFalse),
totalDevices=xform(lambda x, m: self.assertEqual(x, 1, m)),
type=xform(lambda x, m: self.assertEqual(x, "mdcontainer", m)))
self.stateCheck(self.dev2,
createBitmap=xform(self.assertFalse),
level=xform(lambda x, m: self.assertEqual(x.number, 0, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
size=xform(lambda x, m: self.assertEqual(x, Size("2 GiB"), m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 2, m)))
self.stateCheck(self.dev3,
level=xform(lambda x, m: self.assertEqual(x.number, 1, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 2, m)))
self.stateCheck(self.dev4,
level=xform(lambda x, m: self.assertEqual(x.number, 4, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 3, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 3, m)))
self.stateCheck(self.dev5,
level=xform(lambda x, m: self.assertEqual(x.number, 5, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 3, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 3, m)))
self.stateCheck(self.dev6,
level=xform(lambda x, m: self.assertEqual(x.number, 6, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 4, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 4, m)))
self.stateCheck(self.dev7,
level=xform(lambda x, m: self.assertEqual(x.number, 10, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 4, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 4, m)))
##
## existing device tests
##
self.stateCheck(self.dev8,
exists=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 1, m)),
metadataVersion=xform(self.assertIsNone))
##
## mdbiosraidarray tests
##
self.stateCheck(self.dev9,
createBitmap=xform(self.assertFalse),
isDisk=xform(self.assertTrue),
exists=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 0, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
metadataVersion=xform(lambda x, m: self.assertEqual(x, None, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
partitionable=xform(self.assertTrue),
totalDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
type = xform(lambda x, m: self.assertEqual(x, "mdbiosraidarray", m)))
##
## size tests
##
self.stateCheck(self.dev10,
createBitmap=xform(self.assertFalse),
level=xform(lambda x, m: self.assertEqual(x.number, 0, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
targetSize=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)))
self.stateCheck(self.dev11,
isDisk=xform(self.assertTrue),
exists=xform(lambda x, m: self.assertEqual(x, True, m)),
level=xform(lambda x, m: self.assertEqual(x.number, 1, m)),
currentSize=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)),
maxSize=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
metadataVersion=xform(lambda x, m: self.assertEqual(x, None, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
partitionable=xform(self.assertTrue),
size=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)),
targetSize=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
type=xform(lambda x, m: self.assertEqual(x, "mdbiosraidarray", m)))
self.stateCheck(self.dev13,
createBitmap=xform(self.assertFalse),
level=xform(lambda x, m: self.assertEqual(x.number, 0, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 2, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
targetSize=xform(lambda x, m: self.assertEqual(x, Size("32 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 2, m)))
self.stateCheck(self.dev14,
createBitmap=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 4, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 3, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 3, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 3, m)))
self.stateCheck(self.dev15,
createBitmap=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 5, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 3, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 3, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 3, m)))
self.stateCheck(self.dev16,
createBitmap=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 6, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 4, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 4, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 4, m)))
self.stateCheck(self.dev17,
createBitmap=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 10, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 4, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 4, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 4, m)))
self.stateCheck(self.dev18,
createBitmap=xform(self.assertTrue),
level=xform(lambda x, m: self.assertEqual(x.number, 10, m)),
memberDevices=xform(lambda x, m: self.assertEqual(x, 4, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 4, m)),
parents=xform(lambda x, m: self.assertNotEqual(x, [], m)),
size=xform(lambda x, m: self.assertEqual(x, Size("4 MiB"), m)),
spares=xform(lambda x, m: self.assertEqual(x, 1, m)),
totalDevices=xform(lambda x, m: self.assertEqual(x, 5, m)))
self.stateCheck(self.dev19,
level=xform(lambda x, m: self.assertEqual(x.number, 1, m)),
mdadmFormatUUID=xform(lambda x, m: self.assertEqual(x, blockdev.md.get_md_uuid(self.dev19.uuid), m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
uuid=xform(lambda x, m: self.assertEqual(x, self.dev19.uuid, m)))
self.stateCheck(self.dev20,
level=xform(lambda x, m: self.assertEqual(x.number, 1, m)),
members=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 2, m)),
uuid=xform(lambda x, m: self.assertEqual(x, self.dev20.uuid, m)))
with self.assertRaisesRegex(DeviceError, "invalid"):
MDRaidArrayDevice("dev")
with self.assertRaisesRegex(DeviceError, "invalid"):
MDRaidArrayDevice("dev", level="raid2")
with self.assertRaisesRegex(DeviceError, "invalid"):
MDRaidArrayDevice(
"dev",
parents=[StorageDevice("parent", fmt=getFormat("mdmember"))])
with self.assertRaisesRegex(DeviceError, "at least 2 members"):
MDRaidArrayDevice(
"dev",
level="raid0",
parents=[StorageDevice("parent", fmt=getFormat("mdmember"))])
with self.assertRaisesRegex(DeviceError, "invalid"):
MDRaidArrayDevice("dev", level="junk")
with self.assertRaisesRegex(DeviceError, "at least 2 members"):
MDRaidArrayDevice("dev", level=0, memberDevices=2)
def testMDRaidArrayDeviceMethods(self):
"""Test for method calls on initialized MDRaidDevices."""
with self.assertRaisesRegex(DeviceError, "invalid" ):
self.dev7.level = "junk"
with self.assertRaisesRegex(DeviceError, "invalid" ):
self.dev7.level = None
class BTRFSDeviceTestCase(DeviceStateTestCase):
"""Note that these tests postdate the code that they test.
Therefore, they capture the behavior of the code as it is now,
not necessarily its intended or correct behavior. See the initial
commit message for this file for further details.
"""
def __init__(self, methodName='runTest'):
super(BTRFSDeviceTestCase, self).__init__(methodName=methodName)
state_functions = {
"dataLevel" : lambda d, a: self.assertFalse(hasattr(d,a)),
"fstabSpec" : xform(self.assertIsNotNone),
"mediaPresent" : xform(self.assertTrue),
"metaDataLevel" : lambda d, a: self.assertFalse(hasattr(d, a)),
"type" : xform(lambda x, m: self.assertEqual(x, "btrfs", m)),
"vol_id" : xform(lambda x, m: self.assertEqual(x, btrfs.MAIN_VOLUME_ID, m))}
self._state_functions.update(state_functions)
def setUp(self):
self.dev1 = BTRFSVolumeDevice("dev1",
parents=[StorageDevice("deva",
fmt=blivet.formats.getFormat("btrfs"),
size=BTRFS_MIN_MEMBER_SIZE)])
self.dev2 = BTRFSSubVolumeDevice("dev2",
parents=[self.dev1],
fmt=blivet.formats.getFormat("btrfs"))
dev = StorageDevice("deva",
fmt=blivet.formats.getFormat("btrfs"),
size=Size("500 MiB"))
self.dev3 = BTRFSVolumeDevice("dev3",
parents=[dev])
def testBTRFSDeviceInit(self):
"""Tests the state of a BTRFSDevice after initialization.
For some combinations of arguments the initializer will throw
an exception.
"""
self.stateCheck(self.dev1,
currentSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
dataLevel=xform(self.assertIsNone),
isleaf=xform(self.assertFalse),
maxSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
metaDataLevel=xform(self.assertIsNone),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
size=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
type=xform(lambda x, m: self.assertEqual(x, "btrfs volume", m)))
self.stateCheck(self.dev2,
targetSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
currentSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
maxSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
size=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
type=xform(lambda x, m: self.assertEqual(x, "btrfs subvolume", m)),
vol_id=xform(self.assertIsNone))
self.stateCheck(self.dev3,
currentSize=xform(lambda x, m: self.assertEqual(x, Size("500 MiB"), m)),
dataLevel=xform(self.assertIsNone),
maxSize=xform(lambda x, m: self.assertEqual(x, Size("500 MiB"), m)),
metaDataLevel=xform(self.assertIsNone),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
size=xform(lambda x, m: self.assertEqual(x, Size("500 MiB"), m)),
type=xform(lambda x, m: self.assertEqual(x, "btrfs volume", m)))
with self.assertRaisesRegex(ValueError, "BTRFSDevice.*must have at least one parent"):
BTRFSVolumeDevice("dev")
with self.assertRaisesRegex(ValueError, "format"):
BTRFSVolumeDevice("dev", parents=[StorageDevice("deva", size=BTRFS_MIN_MEMBER_SIZE)])
with self.assertRaisesRegex(DeviceError, "btrfs subvolume.*must be a btrfs volume"):
fmt = blivet.formats.getFormat("btrfs")
device = StorageDevice("deva", fmt=fmt, size=BTRFS_MIN_MEMBER_SIZE)
BTRFSSubVolumeDevice("dev1", parents=[device])
deva = OpticalDevice("deva", fmt=blivet.formats.getFormat("btrfs", exists=True),
exists=True)
with self.assertRaisesRegex(BTRFSValueError, "at least"):
BTRFSVolumeDevice("dev1", dataLevel="raid1", parents=[deva])
deva = StorageDevice("deva", fmt=blivet.formats.getFormat("btrfs"), size=BTRFS_MIN_MEMBER_SIZE)
self.assertIsNotNone(BTRFSVolumeDevice("dev1", metaDataLevel="dup", parents=[deva]))
deva = StorageDevice("deva", fmt=blivet.formats.getFormat("btrfs"), size=BTRFS_MIN_MEMBER_SIZE)
with self.assertRaisesRegex(BTRFSValueError, "invalid"):
BTRFSVolumeDevice("dev1", dataLevel="dup", parents=[deva])
self.assertEqual(self.dev1.isleaf, False)
self.assertEqual(self.dev1.direct, True)
self.assertEqual(self.dev2.isleaf, True)
self.assertEqual(self.dev2.direct, True)
member = self.dev1.parents[0]
self.assertEqual(member.isleaf, False)
self.assertEqual(member.direct, False)
def testBTRFSDeviceMethods(self):
"""Test for method calls on initialized BTRFS Devices."""
# volumes do not have ancestor volumes
with self.assertRaises(AttributeError):
self.dev1.volume # pylint: disable=no-member,pointless-statement
# subvolumes do not have default subvolumes
with self.assertRaises(AttributeError):
self.dev2.defaultSubVolume # pylint: disable=no-member,pointless-statement
self.assertIsNotNone(self.dev2.volume)
# size
with self.assertRaisesRegex(RuntimeError, "cannot directly set size of btrfs volume"):
self.dev1.size = Size("500 MiB")
def testBTRFSSnapShotDeviceInit(self):
parents = [StorageDevice("p1", fmt=blivet.formats.getFormat("btrfs"), size=BTRFS_MIN_MEMBER_SIZE)]
vol = BTRFSVolumeDevice("test", parents=parents)
with self.assertRaisesRegex(ValueError, "non-existent btrfs snapshots must have a source"):
BTRFSSnapShotDevice("snap1", parents=[vol])
with self.assertRaisesRegex(ValueError, "btrfs snapshot source must already exist"):
BTRFSSnapShotDevice("snap1", parents=[vol], source=vol)
with self.assertRaisesRegex(ValueError, "btrfs snapshot source must be a btrfs subvolume"):
BTRFSSnapShotDevice("snap1", parents=[vol], source=parents[0])
parents2 = [StorageDevice("p1", fmt=blivet.formats.getFormat("btrfs"), size=BTRFS_MIN_MEMBER_SIZE)]
vol2 = BTRFSVolumeDevice("test2", parents=parents2, exists=True)
with self.assertRaisesRegex(ValueError, ".*snapshot and source must be in the same volume"):
BTRFSSnapShotDevice("snap1", parents=[vol], source=vol2)
vol.exists = True
snap = BTRFSSnapShotDevice("snap1",
fmt=blivet.formats.getFormat("btrfs"),
parents=[vol],
source=vol)
self.stateCheck(snap,
currentSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
targetSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
maxSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
size=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
type=xform(lambda x, m: self.assertEqual(x, "btrfs snapshot", m)),
vol_id=xform(self.assertIsNone))
self.stateCheck(vol,
currentSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
dataLevel=xform(self.assertIsNone),
exists=xform(self.assertTrue),
isleaf=xform(self.assertFalse),
maxSize=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
metaDataLevel=xform(self.assertIsNone),
parents=xform(lambda x, m: self.assertEqual(len(x), 1, m)),
size=xform(lambda x, m: self.assertEqual(x, BTRFS_MIN_MEMBER_SIZE, m)),
type=xform(lambda x, m: self.assertEqual(x, "btrfs volume", m)))
self.assertEqual(snap.isleaf, True)
self.assertEqual(snap.direct, True)
self.assertEqual(vol.isleaf, False)
self.assertEqual(vol.direct, True)
self.assertEqual(snap.dependsOn(vol), True)
self.assertEqual(vol.dependsOn(snap), False)
|
kellinm/blivet
|
tests/devices_test/device_properties_test.py
|
Python
|
gpl-2.0
| 31,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.