repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
taoenator/robber.py
|
refs/heads/master
|
robber/matchers/__init__.py
|
1
|
__all__ = [
'equal', 'identical', 'boolean', 'instanceof',
'regexp', 'respond_to', 'truthy', 'length',
'types', 'numbers', 'contain'
]
# from robber.matchers.base import *
from robber.matchers.equal import *
from robber.matchers.identical import *
from robber.matchers.boolean import *
from robber.matchers.instanceof import *
from robber.matchers.regexp import *
from robber.matchers.respond_to import *
from robber.matchers.truthy import *
from robber.matchers.length import *
from robber.matchers.types import *
from robber.matchers.numbers import *
from robber.matchers.contain import *
|
popazerty/EG-2
|
refs/heads/master
|
lib/python/Components/Task.py
|
55
|
# A Job consists of many "Tasks".
# A task is the run of an external tool, with proper methods for failure handling
from Tools.CList import CList
class Job(object):
NOT_STARTED, IN_PROGRESS, FINISHED, FAILED = range(4)
def __init__(self, name):
self.tasks = [ ]
self.resident_tasks = [ ]
self.workspace = "/tmp"
self.current_task = 0
self.callback = None
self.name = name
self.finished = False
self.end = 100
self.__progress = 0
self.weightScale = 1
self.afterEvent = None
self.state_changed = CList()
self.status = self.NOT_STARTED
self.onSuccess = None
# description is a dict
def fromDescription(self, description):
pass
def createDescription(self):
return None
def getProgress(self):
if self.current_task == len(self.tasks):
return self.end
t = self.tasks[self.current_task]
jobprogress = t.weighting * t.progress / float(t.end) + sum([task.weighting for task in self.tasks[:self.current_task]])
return int(jobprogress*self.weightScale)
progress = property(getProgress)
def getStatustext(self):
return { self.NOT_STARTED: _("Waiting"), self.IN_PROGRESS: _("In progress"), self.FINISHED: _("Finished"), self.FAILED: _("Failed") }[self.status]
def task_progress_changed_CB(self):
self.state_changed()
def addTask(self, task):
task.job = self
task.task_progress_changed = self.task_progress_changed_CB
self.tasks.append(task)
def start(self, callback):
assert self.callback is None
self.callback = callback
self.restart()
def restart(self):
self.status = self.IN_PROGRESS
self.state_changed()
self.runNext()
sumTaskWeightings = sum([t.weighting for t in self.tasks]) or 1
self.weightScale = self.end / float(sumTaskWeightings)
def runNext(self):
if self.current_task == len(self.tasks):
if len(self.resident_tasks) == 0:
self.status = self.FINISHED
self.state_changed()
self.callback(self, None, [])
self.callback = None
else:
print "still waiting for %d resident task(s) %s to finish" % (len(self.resident_tasks), str(self.resident_tasks))
else:
self.tasks[self.current_task].run(self.taskCallback)
self.state_changed()
def taskCallback(self, task, res, stay_resident = False):
cb_idx = self.tasks.index(task)
if stay_resident:
if cb_idx not in self.resident_tasks:
self.resident_tasks.append(self.current_task)
print "task going resident:", task
else:
print "task keeps staying resident:", task
return
if len(res):
print ">>> Error:", res
self.status = self.FAILED
self.state_changed()
self.callback(self, task, res)
if cb_idx != self.current_task:
if cb_idx in self.resident_tasks:
print "resident task finished:", task
self.resident_tasks.remove(cb_idx)
if not res:
self.state_changed()
self.current_task += 1
self.runNext()
def retry(self):
assert self.status == self.FAILED
self.restart()
def abort(self):
if self.current_task < len(self.tasks):
self.tasks[self.current_task].abort()
for i in self.resident_tasks:
self.tasks[i].abort()
def cancel(self):
self.abort()
def __str__(self):
return "Components.Task.Job name=%s #tasks=%s" % (self.name, len(self.tasks))
class Task(object):
def __init__(self, job, name):
self.name = name
self.immediate_preconditions = [ ]
self.global_preconditions = [ ]
self.postconditions = [ ]
self.returncode = None
self.initial_input = None
self.job = None
self.end = 100
self.weighting = 100
self.__progress = 0
self.cmd = None
self.cwd = "/tmp"
self.args = [ ]
self.cmdline = None
self.task_progress_changed = None
self.output_line = ""
job.addTask(self)
self.container = None
def setCommandline(self, cmd, args):
self.cmd = cmd
self.args = args
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
self.postconditions.append(ReturncodePostcondition())
def setCmdline(self, cmdline):
self.cmdline = cmdline
def checkPreconditions(self, immediate = False):
not_met = [ ]
if immediate:
preconditions = self.immediate_preconditions
else:
preconditions = self.global_preconditions
for precondition in preconditions:
if not precondition.check(self):
not_met.append(precondition)
return not_met
def _run(self):
if (self.cmd is None) and (self.cmdline is None):
self.finish()
return
from enigma import eConsoleAppContainer
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.processFinished)
self.container.stdoutAvail.append(self.processStdout)
self.container.stderrAvail.append(self.processStderr)
if self.cwd is not None:
self.container.setCWD(self.cwd)
if not self.cmd and self.cmdline:
print "execute:", self.container.execute(self.cmdline), self.cmdline
else:
assert self.cmd is not None
assert len(self.args) >= 1
print "execute:", self.container.execute(self.cmd, *self.args), ' '.join(self.args)
if self.initial_input:
self.writeInput(self.initial_input)
def run(self, callback):
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if failed_preconditions:
print "[Task] preconditions failed"
callback(self, failed_preconditions)
return
self.callback = callback
try:
self.prepare()
self._run()
except Exception, ex:
print "[Task] exception:", ex
self.postconditions = [FailedPostcondition(ex)]
self.finish()
def prepare(self):
pass
def cleanup(self, failed):
pass
def processStdout(self, data):
self.processOutput(data)
def processStderr(self, data):
self.processOutput(data)
def processOutput(self, data):
self.output_line += data
while True:
i = self.output_line.find('\n')
if i == -1:
break
self.processOutputLine(self.output_line[:i+1])
self.output_line = self.output_line[i+1:]
def processOutputLine(self, line):
print "[Task %s]" % self.name, line[:-1]
pass
def processFinished(self, returncode):
self.returncode = returncode
self.finish()
def abort(self):
if self.container:
self.container.kill()
self.finish(aborted = True)
def finish(self, aborted = False):
self.afterRun()
not_met = [ ]
if aborted:
not_met.append(AbortedPostcondition())
else:
for postcondition in self.postconditions:
if not postcondition.check(self):
not_met.append(postcondition)
self.cleanup(not_met)
self.callback(self, not_met)
def afterRun(self):
pass
def writeInput(self, input):
self.container.write(input)
def getProgress(self):
return self.__progress
def setProgress(self, progress):
if progress > self.end:
progress = self.end
if progress < 0:
progress = 0
self.__progress = progress
if self.task_progress_changed:
self.task_progress_changed()
progress = property(getProgress, setProgress)
def __str__(self):
return "Components.Task.Task name=%s" % self.name
class LoggingTask(Task):
def __init__(self, job, name):
Task.__init__(self, job, name)
self.log = []
def processOutput(self, data):
print "[%s]" % self.name, data,
self.log.append(data)
class PythonTask(Task):
def _run(self):
from twisted.internet import threads
from enigma import eTimer
self.aborted = False
self.pos = 0
threads.deferToThread(self.work).addBoth(self.onComplete)
self.timer = eTimer()
self.timer.callback.append(self.onTimer)
self.timer.start(5)
def work(self):
raise NotImplemented, "work"
def abort(self):
self.aborted = True
if self.callback is None:
self.finish(aborted = True)
def onTimer(self):
self.setProgress(self.pos)
def onComplete(self, result):
self.postconditions.append(FailedPostcondition(result))
self.timer.stop()
del self.timer
self.finish()
class ConditionTask(Task):
"""
Reactor-driven pthread_condition.
Wait for something to happen. Call trigger when something occurs that
is likely to make check() return true. Raise exception in check() to
signal error.
Default is to call trigger() once per second, override prepare/cleanup
to do something else (like waiting for hotplug)...
"""
def __init__(self, job, name, timeoutCount=None):
Task.__init__(self, job, name)
self.timeoutCount = timeoutCount
def _run(self):
self.triggerCount = 0
def prepare(self):
from enigma import eTimer
self.timer = eTimer()
self.timer.callback.append(self.trigger)
self.timer.start(1000)
def cleanup(self, failed):
if hasattr(self, 'timer'):
self.timer.stop()
del self.timer
def check(self):
# override to return True only when condition triggers
return True
def trigger(self):
self.triggerCount += 1
try:
if (self.timeoutCount is not None) and (self.triggerCount > self.timeoutCount):
raise Exception, "Timeout elapsed, sorry"
res = self.check()
except Exception, e:
self.postconditions.append(FailedPostcondition(e))
res = True
if res:
self.finish()
# The jobmanager will execute multiple jobs, each after another.
# later, it will also support suspending jobs (and continuing them after reboot etc)
# It also supports a notification when some error occurred, and possibly a retry.
class JobManager:
def __init__(self):
self.active_jobs = [ ]
self.failed_jobs = [ ]
self.job_classes = [ ]
self.in_background = False
self.visible = False
self.active_job = None
# Set onSuccess to popupTaskView to get a visible notification.
# onFail defaults to notifyFailed which tells the user that it went south.
def AddJob(self, job, onSuccess=None, onFail=None):
job.onSuccess = onSuccess
if onFail is None:
job.onFail = self.notifyFailed
else:
job.onFail = onFail
self.active_jobs.append(job)
self.kick()
def kick(self):
if self.active_job is None:
if self.active_jobs:
self.active_job = self.active_jobs.pop(0)
self.active_job.start(self.jobDone)
def notifyFailed(self, job, task, problems):
from Tools import Notifications
from Screens.MessageBox import MessageBox
if problems[0].RECOVERABLE:
Notifications.AddNotificationWithCallback(self.errorCB, MessageBox, _("Error: %s\nRetry?") % (problems[0].getErrorMessage(task)))
return True
else:
Notifications.AddNotification(MessageBox, job.name + "\n" + _("Error") + ': %s' % (problems[0].getErrorMessage(task)), type = MessageBox.TYPE_ERROR )
return False
def jobDone(self, job, task, problems):
print "job", job, "completed with", problems, "in", task
if problems:
if not job.onFail(job, task, problems):
self.errorCB(False)
else:
self.active_job = None
if job.onSuccess:
job.onSuccess(job)
self.kick()
# Set job.onSuccess to this function if you want to pop up the jobview when the job is done/
def popupTaskView(self, job):
if not self.visible:
from Tools import Notifications
from Screens.TaskView import JobView
self.visible = True
Notifications.AddNotification(JobView, job)
def errorCB(self, answer):
if answer:
print "retrying job"
self.active_job.retry()
else:
print "not retrying job."
self.failed_jobs.append(self.active_job)
self.active_job = None
self.kick()
def getPendingJobs(self):
list = [ ]
if self.active_job:
list.append(self.active_job)
list += self.active_jobs
return list
# some examples:
#class PartitionExistsPostcondition:
# def __init__(self, device):
# self.device = device
#
# def check(self, task):
# import os
# return os.access(self.device + "part1", os.F_OK)
#
#class CreatePartitionTask(Task):
# def __init__(self, device):
# Task.__init__(self, "Creating partition")
# self.device = device
# self.setTool("/sbin/sfdisk")
# self.args += ["-f", self.device + "disc"]
# self.initial_input = "0,\n;\n;\n;\ny\n"
# self.postconditions.append(PartitionExistsPostcondition(self.device))
#
#class CreateFilesystemTask(Task):
# def __init__(self, device, partition = 1, largefile = True):
# Task.__init__(self, "Creating filesystem")
# self.setTool("/sbin/mkfs.ext")
# if largefile:
# self.args += ["-T", "largefile"]
# self.args.append("-m0")
# self.args.append(device + "part%d" % partition)
#
#class FilesystemMountTask(Task):
# def __init__(self, device, partition = 1, filesystem = "ext3"):
# Task.__init__(self, "Mounting filesystem")
# self.setTool("/bin/mount")
# if filesystem is not None:
# self.args += ["-t", filesystem]
# self.args.append(device + "part%d" % partition)
class Condition:
def __init__(self):
pass
RECOVERABLE = False
def getErrorMessage(self, task):
return _("An unknown error occurred!") + " (%s @ task %s)" % (self.__class__.__name__, task.__class__.__name__)
class WorkspaceExistsPrecondition(Condition):
def __init__(self):
pass
def check(self, task):
return os.access(task.job.workspace, os.W_OK)
class DiskspacePrecondition(Condition):
def __init__(self, diskspace_required):
self.diskspace_required = diskspace_required
self.diskspace_available = 0
def check(self, task):
import os
try:
s = os.statvfs(task.job.workspace)
self.diskspace_available = s.f_bsize * s.f_bavail
return self.diskspace_available >= self.diskspace_required
except OSError:
return False
def getErrorMessage(self, task):
return _("Not enough disk space. Please free up some disk space and try again. (%d MB required, %d MB available)") % (self.diskspace_required / 1024 / 1024, self.diskspace_available / 1024 / 1024)
class ToolExistsPrecondition(Condition):
def __init__(self):
pass
def check(self, task):
import os
if task.cmd[0]=='/':
self.realpath = task.cmd
print "[Task.py][ToolExistsPrecondition] WARNING: usage of absolute paths for tasks should be avoided!"
return os.access(self.realpath, os.X_OK)
else:
self.realpath = task.cmd
path = os.environ.get('PATH', '').split(os.pathsep)
path.append(task.cwd + '/')
absolutes = filter(lambda file: os.access(file, os.X_OK), map(lambda directory, file = task.cmd: os.path.join(directory, file), path))
if absolutes:
self.realpath = absolutes[0]
return True
return False
def getErrorMessage(self, task):
return _("A required tool (%s) was not found.") % self.realpath
class AbortedPostcondition(Condition):
def __init__(self):
pass
def getErrorMessage(self, task):
return "Cancelled upon user request"
class ReturncodePostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
if hasattr(task, 'log') and task.log:
log = ''.join(task.log).strip()
log = log.split('\n')[-3:]
log = '\n'.join(log)
return log
else:
return _("Error code") + ": %s" % task.returncode
class FailedPostcondition(Condition):
def __init__(self, exception):
self.exception = exception
def getErrorMessage(self, task):
if isinstance(self.exception, int):
if hasattr(task, 'log'):
log = ''.join(task.log).strip()
log = log.split('\n')[-4:]
log = '\n'.join(log)
return log
else:
return _("Error code") + " %s" % self.exception
return str(self.exception)
def check(self, task):
return (self.exception is None) or (self.exception == 0)
#class HDDInitJob(Job):
# def __init__(self, device):
# Job.__init__(self, _("Initialize Harddisk"))
# self.device = device
# self.fromDescription(self.createDescription())
#
# def fromDescription(self, description):
# self.device = description["device"]
# self.addTask(CreatePartitionTask(self.device))
# self.addTask(CreateFilesystemTask(self.device))
# self.addTask(FilesystemMountTask(self.device))
#
# def createDescription(self):
# return {"device": self.device}
job_manager = JobManager()
|
mhbu50/frappe
|
refs/heads/develop
|
frappe/email/inbox.py
|
8
|
import frappe
import json
def get_email_accounts(user=None):
if not user:
user = frappe.session.user
email_accounts = []
accounts = frappe.get_all("User Email", filters={ "parent": user },
fields=["email_account", "email_id", "enable_outgoing"],
distinct=True, order_by="idx")
if not accounts:
return {
"email_accounts": [],
"all_accounts": ""
}
all_accounts = ",".join([ account.get("email_account") for account in accounts ])
if len(accounts) > 1:
email_accounts.append({
"email_account": all_accounts,
"email_id": "All Accounts"
})
email_accounts.extend(accounts)
email_accounts.extend([
{
"email_account": "Sent",
"email_id": "Sent Mail"
},
{
"email_account": "Spam",
"email_id": "Spam"
},
{
"email_account": "Trash",
"email_id": "Trash"
}
])
return {
"email_accounts": email_accounts,
"all_accounts": all_accounts
}
@frappe.whitelist()
def create_email_flag_queue(names, action):
""" create email flag queue to mark email either as read or unread """
def mark_as_seen_unseen(name, action):
doc = frappe.get_doc("Communication", name)
if action == "Read":
doc.add_seen()
else:
_seen = json.loads(doc._seen or '[]')
_seen = [user for user in _seen if frappe.session.user != user]
doc.db_set('_seen', json.dumps(_seen), update_modified=False)
if not all([names, action]):
return
for name in json.loads(names or []):
uid, seen_status, email_account = frappe.db.get_value("Communication", name,
["ifnull(uid, -1)", "ifnull(seen, 0)", "email_account"])
# can not mark email SEEN or UNSEEN without uid
if not uid or uid == -1:
continue
seen = 1 if action == "Read" else 0
# check if states are correct
if (action =='Read' and seen_status == 0) or (action =='Unread' and seen_status == 1):
create_new = True
email_flag_queue = frappe.db.sql("""select name, action from `tabEmail Flag Queue`
where communication = %(name)s and is_completed=0""", {"name":name}, as_dict=True)
for queue in email_flag_queue:
if queue.action != action:
frappe.delete_doc("Email Flag Queue", queue.name, ignore_permissions=True)
elif queue.action == action:
# Read or Unread request for email is already available
create_new = False
if create_new:
flag_queue = frappe.get_doc({
"uid": uid,
"action": action,
"communication": name,
"doctype": "Email Flag Queue",
"email_account": email_account
})
flag_queue.save(ignore_permissions=True)
frappe.db.set_value("Communication", name, "seen", seen,
update_modified=False)
mark_as_seen_unseen(name, action)
@frappe.whitelist()
def mark_as_trash(communication):
"""set email status to trash"""
frappe.db.set_value("Communication", communication, "email_status", "Trash")
@frappe.whitelist()
def mark_as_spam(communication, sender):
""" set email status to spam """
email_rule = frappe.db.get_value("Email Rule", { "email_id": sender })
if not email_rule:
frappe.get_doc({
"doctype": "Email Rule",
"email_id": sender,
"is_spam": 1
}).insert(ignore_permissions=True)
frappe.db.set_value("Communication", communication, "email_status", "Spam")
def link_communication_to_document(doc, reference_doctype, reference_name, ignore_communication_links):
if not ignore_communication_links:
doc.reference_doctype = reference_doctype
doc.reference_name = reference_name
doc.status = "Linked"
doc.save(ignore_permissions=True)
@frappe.whitelist()
def make_issue_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
issue = frappe.get_doc({
"doctype": "Issue",
"subject": doc.subject,
"communication_medium": doc.communication_medium,
"raised_by": doc.sender or "",
"raised_by_phone": doc.phone_no or ""
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Issue", issue.name, ignore_communication_links)
return issue.name
@frappe.whitelist()
def make_lead_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
frappe.errprint(doc.sender_full_name)
lead_name = frappe.db.get_value("Lead", {"email_id": doc.sender,"mobile_no": doc.phone_no})
if not lead_name:
lead = frappe.get_doc({
"doctype": "Lead",
"lead_name": doc.sender_full_name,
"email_id": doc.sender,
"mobile_no": doc.phone_no
})
lead.flags.ignore_mandatory = True
lead.flags.ignore_permissions = True
lead.insert()
lead_name = lead.name
link_communication_to_document(doc, "Lead", lead_name, ignore_communication_links)
return lead_name
@frappe.whitelist()
def make_opportunity_from_communication(communication, ignore_communication_links=False):
doc = frappe.get_doc("Communication", communication)
lead = doc.reference_name if doc.reference_doctype == "Lead" else None
if not lead:
lead = make_lead_from_communication(communication, ignore_communication_links=True)
enquiry_from = "Lead"
opportunity = frappe.get_doc({
"doctype": "Opportunity",
"enquiry_from": enquiry_from,
"lead": lead
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Opportunity", opportunity.name, ignore_communication_links)
return opportunity.name
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/lxml-3.4.4/ez_setup.py
|
202
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
illfelder/libcloud
|
refs/heads/trunk
|
libcloud/common/dimensiondata.py
|
11
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimension Data Common Components
"""
from base64 import b64encode
from time import sleep
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# from distutils.version import LooseVersion
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse
from libcloud.compute.base import Node
from libcloud.utils.py3 import basestring
from libcloud.utils.xml import findtext
from libcloud.compute.types import LibcloudError, InvalidCredsError
# Roadmap / TODO:
#
# 1.0 - Copied from OpSource API, named provider details.
# setup a few variables to represent all of the DimensionData cloud namespaces
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
SERVER_NS = NAMESPACE_BASE + "/server"
NETWORK_NS = NAMESPACE_BASE + "/network"
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
GENERAL_NS = NAMESPACE_BASE + "/general"
BACKUP_NS = NAMESPACE_BASE + "/backup"
# API 2.0 Namespaces and URNs
TYPES_URN = "urn:didata.com:api:cloud:types"
# API end-points
API_ENDPOINTS = {
'dd-na': {
'name': 'North America (NA)',
'host': 'api-na.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au': {
'name': 'Australia (AU)',
'host': 'api-au.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au-gov': {
'name': 'Australia Canberra ACT (AU)',
'host': 'api-canberra.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-af': {
'name': 'Africa (AF)',
'host': 'api-mea.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-latam': {
'name': 'South America (LATAM)',
'host': 'api-latam.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-canada': {
'name': 'Canada (CA)',
'host': 'api-canada.dimensiondata.com',
'vendor': 'DimensionData'
},
'is-na': {
'name': 'North America (NA)',
'host': 'usapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-eu': {
'name': 'Europe (EU)',
'host': 'euapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-au': {
'name': 'Australia (AU)',
'host': 'auapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-af': {
'name': 'Africa (AF)',
'host': 'meaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-ap': {
'name': 'Asia Pacific (AP)',
'host': 'apapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-latam': {
'name': 'South America (LATAM)',
'host': 'latamapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-canada': {
'name': 'Canada (CA)',
'host': 'canadaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'ntta-na': {
'name': 'North America (NA)',
'host': 'cloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-eu': {
'name': 'Europe (EU)',
'host': 'eucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-au': {
'name': 'Australia (AU)',
'host': 'aucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-af': {
'name': 'Africa (AF)',
'host': 'sacloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-ap': {
'name': 'Asia Pacific (AP)',
'host': 'hkcloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'cisco-na': {
'name': 'North America (NA)',
'host': 'iaas-api-na.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-eu': {
'name': 'Europe (EU)',
'host': 'iaas-api-eu.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-au': {
'name': 'Australia (AU)',
'host': 'iaas-api-au.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-af': {
'name': 'Africa (AF)',
'host': 'iaas-api-mea.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-ap': {
'name': 'Asia Pacific (AP)',
'host': 'iaas-api-ap.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-latam': {
'name': 'South America (LATAM)',
'host': 'iaas-api-sa.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-canada': {
'name': 'Canada (CA)',
'host': 'iaas-api-ca.cisco-ccs.com',
'vendor': 'Cisco'
},
'med1-il': {
'name': 'Israel (IL)',
'host': 'api.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-na': {
'name': 'North America (NA)',
'host': 'api-na.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-au': {
'name': 'Australia (AU)',
'host': 'api-au.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-af': {
'name': 'Africa (AF)',
'host': 'api-af.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-latam': {
'name': 'South America (LATAM)',
'host': 'api-sa.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-canada': {
'name': 'Canada (CA)',
'host': 'api-ca.cloud.med-1.com',
'vendor': 'Med-1'
},
'indosat-id': {
'name': 'Indonesia (ID)',
'host': 'iaas-api.indosat.com',
'vendor': 'Indosat'
},
'indosat-na': {
'name': 'North America (NA)',
'host': 'iaas-usapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-eu': {
'name': 'Europe (EU)',
'host': 'iaas-euapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-au': {
'name': 'Australia (AU)',
'host': 'iaas-auapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-af': {
'name': 'Africa (AF)',
'host': 'iaas-afapi.indosat.com',
'vendor': 'Indosat'
},
'bsnl-in': {
'name': 'India (IN)',
'host': 'api.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-na': {
'name': 'North America (NA)',
'host': 'usapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-eu': {
'name': 'Europe (EU)',
'host': 'euapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-au': {
'name': 'Australia (AU)',
'host': 'auapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-af': {
'name': 'Africa (AF)',
'host': 'afapi.bsnlcloud.com',
'vendor': 'BSNL'
}
}
# Default API end-point for the base connection class.
DEFAULT_REGION = 'dd-na'
BAD_CODE_XML_ELEMENTS = (
('responseCode', SERVER_NS),
('responseCode', TYPES_URN),
('result', GENERAL_NS)
)
BAD_MESSAGE_XML_ELEMENTS = (
('message', SERVER_NS),
('message', TYPES_URN),
('resultDetail', GENERAL_NS)
)
def dd_object_to_id(obj, obj_type, id_value='id'):
"""
Takes in a DD object or string and prints out it's id
This is a helper method, as many of our functions can take either an object
or a string, and we need an easy way of converting them
:param obj: The object to get the id for
:type obj: ``object``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:rtype: ``str``
"""
if isinstance(obj, obj_type):
return getattr(obj, id_value)
elif isinstance(obj, (basestring)):
return obj
else:
raise TypeError(
"Invalid type %s looking for basestring or %s"
% (type(obj).__name__, obj_type.__name__)
)
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# This is a temporary workaround.
def LooseVersion(version):
return float(version)
class NetworkDomainServicePlan(object):
ESSENTIALS = "ESSENTIALS"
ADVANCED = "ADVANCED"
class DimensionDataRawResponse(RawResponse):
pass
class DimensionDataResponse(XmlResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
body = self.parse_body()
if self.status == httplib.BAD_REQUEST:
for response_code in BAD_CODE_XML_ELEMENTS:
code = findtext(body, response_code[0], response_code[1])
if code is not None:
break
for message in BAD_MESSAGE_XML_ELEMENTS:
message = findtext(body, message[0], message[1])
if message is not None:
break
raise DimensionDataAPIException(code=code,
msg=message,
driver=self.connection.driver)
if self.status is not httplib.OK:
raise DimensionDataAPIException(code=self.status,
msg=body,
driver=self.connection.driver)
return self.body
class DimensionDataAPIException(LibcloudError):
def __init__(self, code, msg, driver):
self.code = code
self.msg = msg
self.driver = driver
def __str__(self):
return "%s: %s" % (self.code, self.msg)
def __repr__(self):
return ("<DimensionDataAPIException: code='%s', msg='%s'>" %
(self.code, self.msg))
class DimensionDataConnection(ConnectionUserAndKey):
"""
Connection class for the DimensionData driver
"""
api_path_version_1 = '/oec'
api_path_version_2 = '/caas'
api_version_1 = 0.9
# Earliest version supported
oldest_api_version = '2.2'
# Latest version supported
latest_api_version = '2.4'
# Default api version
active_api_version = '2.4'
_orgId = None
responseCls = DimensionDataResponse
rawResponseCls = DimensionDataRawResponse
allow_insecure = False
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
api_version=None, **conn_kwargs):
super(DimensionDataConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
if conn_kwargs['region']:
self.host = conn_kwargs['region']['host']
if api_version:
if LooseVersion(api_version) < LooseVersion(
self.oldest_api_version):
msg = 'API Version specified is too old. No longer ' \
'supported. Please upgrade to the latest version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
elif LooseVersion(api_version) > LooseVersion(
self.latest_api_version):
msg = 'Unsupported API Version. The version specified is ' \
'not release yet. Please use the latest supported ' \
'version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
else:
# Overwrite default version using the version user specified
self.active_api_version = api_version
def add_default_headers(self, headers):
headers['Authorization'] = \
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
self.key))).decode('utf-8'))
headers['Content-Type'] = 'application/xml'
return headers
def request_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s" % (self.api_path_version_1,
self.api_version_1, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_api_2(self, path, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s/%s" % (self.api_path_version_2,
self.active_api_version, path, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def raw_request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers, raw=True)
def request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_2(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def paginated_request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET',
page_size=250):
"""
A paginated request to the MCP2.0 API
This essentially calls out to request_with_orgId_api_2 for each page
and yields the response to make a generator
This generator can be looped through to grab all the pages.
:param action: The resource to access (i.e. 'network/vlan')
:type action: ``str``
:param params: Parameters to give to the action
:type params: ``dict`` or ``None``
:param data: The data payload to be added to the request
:type data: ``str``
:param headers: Additional header to be added to the request
:type headers: ``str`` or ``dict`` or ``None``
:param method: HTTP Method for the request (i.e. 'GET', 'POST')
:type method: ``str``
:param page_size: The size of each page to be returned
Note: Max page size in MCP2.0 is currently 250
:type page_size: ``int``
"""
if params is None:
params = {}
params['pageSize'] = page_size
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
yield resp
if len(resp) <= 0:
raise StopIteration
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
while int(pcount) >= int(psize):
params['pageNumber'] = int(pnumber) + 1
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
yield resp
def get_resource_path_api_1(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1,
self._get_orgId()))
def get_resource_path_api_2(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_2, self.active_api_version,
self._get_orgId()))
def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args,
**kwargs):
"""
Wait for the function which returns a instance with field status/state
to match.
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
:return: Result from the calling function.
"""
cnt = 0
result = None
object_state = None
while cnt < timeout / poll_interval:
result = func(*args, **kwargs)
if isinstance(result, Node):
object_state = result.state
else:
object_state = result.status
if object_state is state or object_state in state:
return result
sleep(poll_interval)
cnt += 1
msg = 'Status check for object %s timed out' % (result)
raise DimensionDataAPIException(code=object_state,
msg=msg,
driver=self.driver)
def _get_orgId(self):
"""
Send the /myaccount API request to DimensionData cloud and parse the
'orgId' from the XML response object. We need the orgId to use most
of the other API functions
"""
if self._orgId is None:
body = self.request_api_1('myaccount').object
self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
return self._orgId
def get_account_details(self):
"""
Get the details of this account
:rtype: :class:`DimensionDataAccountDetails`
"""
body = self.request_api_1('myaccount').object
return DimensionDataAccountDetails(
user_name=findtext(body, 'userName', DIRECTORY_NS),
full_name=findtext(body, 'fullName', DIRECTORY_NS),
first_name=findtext(body, 'firstName', DIRECTORY_NS),
last_name=findtext(body, 'lastName', DIRECTORY_NS),
email=findtext(body, 'emailAddress', DIRECTORY_NS))
class DimensionDataAccountDetails(object):
"""
Dimension Data account class details
"""
def __init__(self, user_name, full_name, first_name, last_name, email):
self.user_name = user_name
self.full_name = full_name
self.first_name = first_name
self.last_name = last_name
self.email = email
class DimensionDataStatus(object):
"""
DimensionData API pending operation status class
action, request_time, user_name, number_of_steps, update_time,
step.name, step.number, step.percent_complete, failure_reason,
"""
def __init__(self, action=None, request_time=None, user_name=None,
number_of_steps=None, update_time=None, step_name=None,
step_number=None, step_percent_complete=None,
failure_reason=None):
self.action = action
self.request_time = request_time
self.user_name = user_name
self.number_of_steps = number_of_steps
self.update_time = update_time
self.step_name = step_name
self.step_number = step_number
self.step_percent_complete = step_percent_complete
self.failure_reason = failure_reason
def __repr__(self):
return (('<DimensionDataStatus: action=%s, request_time=%s, '
'user_name=%s, number_of_steps=%s, update_time=%s, '
'step_name=%s, step_number=%s, '
'step_percent_complete=%s, failure_reason=%s>')
% (self.action, self.request_time, self.user_name,
self.number_of_steps, self.update_time, self.step_name,
self.step_number, self.step_percent_complete,
self.failure_reason))
class DimensionDataNetwork(object):
"""
DimensionData network with location.
"""
def __init__(self, id, name, description, location, private_net,
multicast, status):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.private_net = private_net
self.multicast = multicast
self.status = status
def __repr__(self):
return (('<DimensionDataNetwork: id=%s, name=%s, description=%s, '
'location=%s, private_net=%s, multicast=%s>')
% (self.id, self.name, self.description, self.location,
self.private_net, self.multicast))
class DimensionDataNetworkDomain(object):
"""
DimensionData network domain with location.
"""
def __init__(self, id, name, description, location, status, plan):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.status = status
self.plan = plan
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, name=%s, '
'description=%s, location=%s, status=%s, plan=%s>')
% (self.id, self.name, self.description, self.location,
self.status, self.plan))
class DimensionDataPublicIpBlock(object):
"""
DimensionData Public IP Block with location.
"""
def __init__(self, id, base_ip, size, location, network_domain,
status):
self.id = str(id)
self.base_ip = base_ip
self.size = size
self.location = location
self.network_domain = network_domain
self.status = status
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, base_ip=%s, '
'size=%s, location=%s, status=%s>')
% (self.id, self.base_ip, self.size, self.location,
self.status))
class DimensionDataServerCpuSpecification(object):
"""
A class that represents the specification of the CPU(s) for a
node
"""
def __init__(self, cpu_count, cores_per_socket, performance):
"""
Instantiate a new :class:`DimensionDataServerCpuSpecification`
:param cpu_count: The number of CPUs
:type cpu_count: ``int``
:param cores_per_socket: The number of cores per socket, the
recommendation is 1
:type cores_per_socket: ``int``
:param performance: The performance type, e.g. HIGHPERFORMANCE
:type performance: ``str``
"""
self.cpu_count = cpu_count
self.cores_per_socket = cores_per_socket
self.performance = performance
def __repr__(self):
return (('<DimensionDataServerCpuSpecification: '
'cpu_count=%s, cores_per_socket=%s, '
'performance=%s>')
% (self.cpu_count, self.cores_per_socket, self.performance))
class DimensionDataServerDisk(object):
"""
A class that represents the disk on a server
"""
def __init__(self, id=None, scsi_id=None, size_gb=None, speed=None,
state=None):
"""
Instantiate a new :class:`DimensionDataServerDisk`
:param id: The id of the disk
:type id: ``str``
:param scsi_id: Representation for scsi
:type scsi_id: ``int``
:param size_gb: Size of the disk
:type size_gb: ``int``
:param speed: Speed of the disk (i.e. STANDARD)
:type speed: ``str``
:param state: State of the disk (i.e. PENDING)
:type state: ``str``
"""
self.id = id
self.scsi_id = scsi_id
self.size_gb = size_gb
self.speed = speed
self.state = state
def __repr__(self):
return (('<DimensionDataServerDisk: '
'id=%s, size_gb=%s')
% (self.id, self.size_gb))
class DimensionDataServerVMWareTools(object):
"""
A class that represents the VMWareTools for a node
"""
def __init__(self, status, version_status, api_version):
"""
Instantiate a new :class:`DimensionDataServerVMWareTools` object
:param status: The status of VMWare Tools
:type status: ``str``
:param version_status: The status for the version of VMWare Tools
(i.e NEEDS_UPGRADE)
:type version_status: ``str``
:param api_version: The API version of VMWare Tools
:type api_version: ``str``
"""
self.status = status
self.version_status = version_status
self.api_version = api_version
def __repr__(self):
return (('<DimensionDataServerVMWareTools '
'status=%s, version_status=%s, '
'api_version=%s>')
% (self.status, self.version_status, self.api_version))
class DimensionDataFirewallRule(object):
"""
DimensionData Firewall Rule for a network domain
"""
def __init__(self, id, name, action, location, network_domain,
status, ip_version, protocol, source, destination,
enabled):
self.id = str(id)
self.name = name
self.action = action
self.location = location
self.network_domain = network_domain
self.status = status
self.ip_version = ip_version
self.protocol = protocol
self.source = source
self.destination = destination
self.enabled = enabled
def __repr__(self):
return (('<DimensionDataFirewallRule: id=%s, name=%s, '
'action=%s, location=%s, network_domain=%s, '
'status=%s, ip_version=%s, protocol=%s, source=%s, '
'destination=%s, enabled=%s>')
% (self.id, self.name, self.action, self.location,
self.network_domain, self.status, self.ip_version,
self.protocol, self.source, self.destination,
self.enabled))
class DimensionDataFirewallAddress(object):
"""
The source or destination model in a firewall rule
"""
def __init__(self, any_ip, ip_address, ip_prefix_size,
port_begin, port_end, address_list_id,
port_list_id):
self.any_ip = any_ip
self.ip_address = ip_address
self.ip_prefix_size = ip_prefix_size
self.port_list_id = port_list_id
self.port_begin = port_begin
self.port_end = port_end
self.address_list_id = address_list_id
self.port_list_id = port_list_id
def __repr__(self):
return (
'<DimensionDataFirewallAddress: any_ip=%s, ip_address=%s, '
'ip_prefix_size=%s, port_begin=%s, port_end=%s, '
'address_list_id=%s, port_list_id=%s>'
% (self.any_ip, self.ip_address, self.ip_prefix_size,
self.port_begin, self.port_end, self.address_list_id,
self.port_list_id))
class DimensionDataNatRule(object):
"""
An IP NAT rule in a network domain
"""
def __init__(self, id, network_domain, internal_ip, external_ip, status):
self.id = id
self.network_domain = network_domain
self.internal_ip = internal_ip
self.external_ip = external_ip
self.status = status
def __repr__(self):
return (('<DimensionDataNatRule: id=%s, status=%s>')
% (self.id, self.status))
class DimensionDataAntiAffinityRule(object):
"""
Anti-Affinity rule for DimensionData
An Anti-Affinity rule ensures that servers in the rule will
not reside on the same VMware ESX host.
"""
def __init__(self, id, node_list):
"""
Instantiate a new :class:`DimensionDataAntiAffinityRule`
:param id: The ID of the Anti-Affinity rule
:type id: ``str``
:param node_list: List of node ids that belong in this rule
:type node_list: ``list`` of ``str``
"""
self.id = id
self.node_list = node_list
def __repr__(self):
return (('<DimensionDataAntiAffinityRule: id=%s>')
% (self.id))
class DimensionDataVlan(object):
"""
DimensionData VLAN.
"""
def __init__(self, id, name, description, location, network_domain,
status, private_ipv4_range_address, private_ipv4_range_size,
ipv6_range_address, ipv6_range_size, ipv4_gateway,
ipv6_gateway):
"""
Initialize an instance of ``DimensionDataVlan``
:param id: The ID of the VLAN
:type id: ``str``
:param name: The name of the VLAN
:type name: ``str``
:param description: Plan text description of the VLAN
:type description: ``str``
:param location: The location (data center) of the VLAN
:type location: ``NodeLocation``
:param network_domain: The Network Domain that owns this VLAN
:type network_domain: :class:`DimensionDataNetworkDomain`
:param status: The status of the VLAN
:type status: :class:`DimensionDataStatus`
:param private_ipv4_range_address: The host address of the VLAN
IP space
:type private_ipv4_range_address: ``str``
:param private_ipv4_range_size: The size (e.g. '24') of the VLAN
as a CIDR range size
:type private_ipv4_range_size: ``int``
:param ipv6_range_address: The host address of the VLAN
IP space
:type ipv6_range_address: ``str``
:param ipv6_range_size: The size (e.g. '32') of the VLAN
as a CIDR range size
:type ipv6_range_size: ``int``
:param ipv4_gateway: The IPv4 default gateway address
:type ipv4_gateway: ``str``
:param ipv6_gateway: The IPv6 default gateway address
:type ipv6_gateway: ``str``
"""
self.id = str(id)
self.name = name
self.location = location
self.description = description
self.network_domain = network_domain
self.status = status
self.private_ipv4_range_address = private_ipv4_range_address
self.private_ipv4_range_size = private_ipv4_range_size
self.ipv6_range_address = ipv6_range_address
self.ipv6_range_size = ipv6_range_size
self.ipv4_gateway = ipv4_gateway
self.ipv6_gateway = ipv6_gateway
def __repr__(self):
return (('<DimensionDataVlan: id=%s, name=%s, '
'description=%s, location=%s, status=%s>')
% (self.id, self.name, self.description,
self.location, self.status))
class DimensionDataPool(object):
"""
DimensionData VIP Pool.
"""
def __init__(self, id, name, description, status, load_balance_method,
health_monitor_id, service_down_action, slow_ramp_time):
"""
Initialize an instance of ``DimensionDataPool``
:param id: The ID of the pool
:type id: ``str``
:param name: The name of the pool
:type name: ``str``
:param description: Plan text description of the pool
:type description: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param load_balance_method: The load balancer method
:type load_balance_method: ``str``
:param health_monitor_id: The ID of the health monitor
:type health_monitor_id: ``str``
:param service_down_action: Action to take when pool is down
:type service_down_action: ``str``
:param slow_ramp_time: The ramp-up time for service recovery
:type slow_ramp_time: ``int``
"""
self.id = str(id)
self.name = name
self.description = description
self.status = status
self.load_balance_method = load_balance_method
self.health_monitor_id = health_monitor_id
self.service_down_action = service_down_action
self.slow_ramp_time = slow_ramp_time
def __repr__(self):
return (('<DimensionDataPool: id=%s, name=%s, '
'description=%s, status=%s>')
% (self.id, self.name, self.description,
self.status))
class DimensionDataPoolMember(object):
"""
DimensionData VIP Pool Member.
"""
def __init__(self, id, name, status, ip, port, node_id):
"""
Initialize an instance of ``DimensionDataPoolMember``
:param id: The ID of the pool member
:type id: ``str``
:param name: The name of the pool member
:type name: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the pool member
:type ip: ``str``
:param port: The port of the pool member
:type port: ``int``
:param node_id: The ID of the associated node
:type node_id: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.port = port
self.node_id = node_id
def __repr__(self):
return (('<DimensionDataPoolMember: id=%s, name=%s, '
'ip=%s, status=%s, port=%s, node_id=%s>')
% (self.id, self.name,
self.ip, self.status, self.port,
self.node_id))
class DimensionDataVIPNode(object):
def __init__(self, id, name, status, ip, connection_limit='10000',
connection_rate_limit='10000'):
"""
Initialize an instance of :class:`DimensionDataVIPNode`
:param id: The ID of the node
:type id: ``str``
:param name: The name of the node
:type name: ``str``
:param status: The status of the node
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the node
:type ip: ``str``
:param connection_limit: The total connection limit for the node
:type connection_limit: ``int``
:param connection_rate_limit: The rate limit for the node
:type connection_rate_limit: ``int``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.connection_limit = connection_limit
self.connection_rate_limit = connection_rate_limit
def __repr__(self):
return (('<DimensionDataVIPNode: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataVirtualListener(object):
"""
DimensionData Virtual Listener.
"""
def __init__(self, id, name, status, ip):
"""
Initialize an instance of :class:`DimensionDataVirtualListener`
:param id: The ID of the listener
:type id: ``str``
:param name: The name of the listener
:type name: ``str``
:param status: The status of the listener
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the listener
:type ip: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
def __repr__(self):
return (('<DimensionDataVirtualListener: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataDefaultHealthMonitor(object):
"""
A default health monitor for a VIP (node, pool or listener)
"""
def __init__(self, id, name, node_compatible, pool_compatible):
"""
Initialize an instance of :class:`DimensionDataDefaultHealthMonitor`
:param id: The ID of the monitor
:type id: ``str``
:param name: The name of the monitor
:type name: ``str``
:param node_compatible: Is a monitor capable of monitoring nodes
:type node_compatible: ``bool``
:param pool_compatible: Is a monitor capable of monitoring pools
:type pool_compatible: ``bool``
"""
self.id = id
self.name = name
self.node_compatible = node_compatible
self.pool_compatible = pool_compatible
def __repr__(self):
return (('<DimensionDataDefaultHealthMonitor: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataPersistenceProfile(object):
"""
Each Persistence Profile declares the combination of Virtual Listener
type and protocol with which it is
compatible and whether or not it is compatible as a
Fallback Persistence Profile.
"""
def __init__(self, id, name, compatible_listeners, fallback_compatible):
"""
Initialize an instance of :class:`DimensionDataPersistenceProfile`
:param id: The ID of the profile
:type id: ``str``
:param name: The name of the profile
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
:param fallback_compatible: Is capable as a fallback profile
:type fallback_compatible: ``bool``
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
self.fallback_compatible = fallback_compatible
def __repr__(self):
return (('<DimensionDataPersistenceProfile: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataDefaultiRule(object):
"""
A default iRule for a network domain, can be applied to a listener
"""
def __init__(self, id, name, compatible_listeners):
"""
Initialize an instance of :class:`DimensionDataDefaultiRule`
:param id: The ID of the iRule
:type id: ``str``
:param name: The name of the iRule
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
def __repr__(self):
return (('<DimensionDataDefaultiRule: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataVirtualListenerCompatibility(object):
"""
A compatibility preference for a persistence profile or iRule
specifies which virtual listener types this profile or iRule can be
applied to.
"""
def __init__(self, type, protocol):
self.type = type
self.protocol = protocol
def __repr__(self):
return (('<DimensionDataVirtualListenerCompatibility: '
'type=%s, protocol=%s>')
% (self.type, self.protocol))
class DimensionDataBackupDetails(object):
"""
Dimension Data Backup Details represents information about
a targets backups configuration
"""
def __init__(self, asset_id, service_plan, status, clients=None):
"""
Initialize an instance of :class:`DimensionDataBackupDetails`
:param asset_id: Asset identification for backups
:type asset_id: ``str``
:param service_plan: The service plan for backups. i.e (Essentials)
:type service_plan: ``str``
:param status: The overall status this backup target.
i.e. (unregistered)
:type status: ``str``
:param clients: Backup clients attached to this target
:type clients: ``list`` of :class:`DimensionDataBackupClient`
"""
self.asset_id = asset_id
self.service_plan = service_plan
self.status = status
self.clients = clients
def __repr__(self):
return (('<DimensionDataBackupDetails: id=%s>')
% (self.asset_id))
class DimensionDataBackupClient(object):
"""
An object that represents a backup client
"""
def __init__(self, id, type, status,
schedule_policy, storage_policy, download_url,
alert=None, running_job=None):
"""
Initialize an instance of :class:`DimensionDataBackupClient`
:param id: Unique ID for the client
:type id: ``str``
:param type: The type of client that this client is
:type type: :class:`DimensionDataBackupClientType`
:param status: The states of this particular backup client.
i.e. (Unregistered)
:type status: ``str``
:param schedule_policy: The schedule policy for this client
NOTE: Dimension Data only sends back the name
of the schedule policy, no further details
:type schedule_policy: ``str``
:param storage_policy: The storage policy for this client
NOTE: Dimension Data only sends back the name
of the storage policy, no further details
:type storage_policy: ``str``
:param download_url: The download url for this client
:type download_url: ``str``
:param alert: The alert configured for this backup client (optional)
:type alert: :class:`DimensionDataBackupClientAlert`
:param alert: The running job for the client (optional)
:type alert: :class:`DimensionDataBackupClientRunningJob`
"""
self.id = id
self.type = type
self.status = status
self.schedule_policy = schedule_policy
self.storage_policy = storage_policy
self.download_url = download_url
self.alert = alert
self.running_job = running_job
def __repr__(self):
return (('<DimensionDataBackupClient: id=%s>')
% (self.id))
class DimensionDataBackupClientAlert(object):
"""
An alert for a backup client
"""
def __init__(self, trigger, notify_list=[]):
"""
Initialize an instance of :class:`DimensionDataBackupClientAlert`
:param trigger: Trigger type for the client i.e. ON_FAILURE
:type trigger: ``str``
:param notify_list: List of email addresses that are notified
when the alert is fired
:type notify_list: ``list`` of ``str``
"""
self.trigger = trigger
self.notify_list = notify_list
def __repr__(self):
return (('<DimensionDataBackupClientAlert: trigger=%s>')
% (self.trigger))
class DimensionDataBackupClientRunningJob(object):
"""
A running job for a given backup client
"""
def __init__(self, id, status, percentage=0):
"""
Initialize an instance of :class:`DimensionDataBackupClientRunningJob`
:param id: The unqiue ID of the job
:type id: ``str``
:param status: The status of the job i.e. Waiting
:type status: ``str``
:param percentage: The percentage completion of the job
:type percentage: ``int``
"""
self.id = id
self.percentage = percentage
self.status = status
def __repr__(self):
return (('<DimensionDataBackupClientRunningJob: id=%s>')
% (self.id))
class DimensionDataBackupClientType(object):
"""
A client type object for backups
"""
def __init__(self, type, is_file_system, description):
"""
Initialize an instance of :class:`DimensionDataBackupClientType`
:param type: The type of client i.e. (FA.Linux, MySQL, ect.)
:type type: ``str``
:param is_file_system: The name of the iRule
:type is_file_system: ``bool``
:param description: Description of the client
:type description: ``str``
"""
self.type = type
self.is_file_system = is_file_system
self.description = description
def __repr__(self):
return (('<DimensionDataBackupClientType: type=%s>')
% (self.type))
class DimensionDataBackupStoragePolicy(object):
"""
A representation of a storage policy
"""
def __init__(self, name, retention_period, secondary_location):
"""
Initialize an instance of :class:`DimensionDataBackupStoragePolicy`
:param name: The name of the storage policy i.e. 14 Day Storage Policy
:type name: ``str``
:param retention_period: How long to keep the backup in days
:type retention_period: ``int``
:param secondary_location: The secondary location i.e. Primary
:type secondary_location: ``str``
"""
self.name = name
self.retention_period = retention_period
self.secondary_location = secondary_location
def __repr__(self):
return (('<DimensionDataBackupStoragePolicy: name=%s>')
% (self.name))
class DimensionDataBackupSchedulePolicy(object):
"""
A representation of a schedule policy
"""
def __init__(self, name, description):
"""
Initialize an instance of :class:`DimensionDataBackupSchedulePolicy`
:param name: The name of the policy i.e 12AM - 6AM
:type name: ``str``
:param description: Short summary of the details of the policy
:type description: ``str``
"""
self.name = name
self.description = description
def __repr__(self):
return (('<DimensionDataBackupSchedulePolicy: name=%s>')
% (self.name))
class DimensionDataTag(object):
"""
A representation of a Tag in Dimension Data
A Tag first must have a Tag Key, then an asset is tag with
a key and an option value. Tags can be queried later to filter assets
and also show up on usage report if so desired.
"""
def __init__(self, asset_type, asset_id, asset_name,
datacenter, key, value):
"""
Initialize an instance of :class:`DimensionDataTag`
:param asset_type: The type of asset. Current asset types:
SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE,
PUBLIC_IP_BLOCK, ACCOUNT
:type asset_type: ``str``
:param asset_id: The GUID of the asset that is tagged
:type asset_id: ``str``
:param asset_name: The name of the asset that is tagged
:type asset_name: ``str``
:param datacenter: The short datacenter name of the tagged asset
:type datacenter: ``str``
:param key: The tagged key
:type key: :class:`DimensionDataTagKey`
:param value: The tagged value
:type value: ``None`` or ``str``
"""
self.asset_type = asset_type
self.asset_id = asset_id
self.asset_name = asset_name
self.datacenter = datacenter
self.key = key
self.value = value
def __repr__(self):
return (('<DimensionDataTag: asset_name=%s, tag_name=%s, value=%s>')
% (self.asset_name, self.key.name, self.value))
class DimensionDataTagKey(object):
"""
A representation of a Tag Key in Dimension Data
A tag key is required to tag an asset
"""
def __init__(self, id, name, description,
value_required, display_on_report):
"""
Initialize an instance of :class:`DimensionDataTagKey`
:param id: GUID of the tag key
:type id: ``str``
:param name: Name of the tag key
:type name: ``str``
:param description: Description of the tag key
:type description: ``str``
:param value_required: If a value is required for this tag key
:type value_required: ``bool``
:param display_on_report: If this tag key should be displayed on
usage reports
:type display_on_report: ``bool``
"""
self.id = id
self.name = name
self.description = description
self.value_required = value_required
self.display_on_report = display_on_report
def __repr__(self):
return (('<DimensionDataTagKey: name=%s>')
% (self.name))
class DimensionDataIpAddressList(object):
"""
DimensionData IP Address list
"""
def __init__(self, id, name, description, ip_version,
ip_address_collection,
state, create_time, child_ip_address_lists=None):
""""
Initialize an instance of :class:`DimensionDataIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
:param description: Description of the IP Address List
:type description: ``str``
:param ip_version: IP version. E.g. IPV4, IPV6
:type ip_version: ``str``
:param ip_address_collection: Collection of DimensionDataIpAddress
:type ip_address_collection: ``List``
:param state: IP Address list state
:type state: ``str``
:param create_time: IP Address List created time
:type create_time: ``date time``
:param child_ip_address_lists: List of IP address list to be included
:type child_ip_address_lists: List
of :class:'DimensionDataIpAddressList'
"""
self.id = id
self.name = name
self.description = description
self.ip_version = ip_version
self.ip_address_collection = ip_address_collection
self.state = state
self.create_time = create_time
self.child_ip_address_lists = child_ip_address_lists
def __repr__(self):
return ('<DimensionDataIpAddressList: id=%s, name=%s, description=%s, '
'ip_version=%s, ip_address_collection=%s, state=%s, '
'create_time=%s, child_ip_address_lists=%s>'
% (self.id, self.name, self.description, self.ip_version,
self.ip_address_collection, self.state, self.create_time,
self.child_ip_address_lists))
class DimensionDataChildIpAddressList(object):
"""
DimensionData Child IP Address list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildIpAddressList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataIpAddress(object):
"""
A representation of IP Address in Dimension Data
"""
def __init__(self, begin, end=None, prefix_size=None):
"""
Initialize an instance of :class:`DimensionDataIpAddress`
:param begin: IP Address Begin
:type begin: ``str``
:param end: IP Address end
:type end: ``str``
:param prefixSize: IP Address prefix size
:type prefixSize: ``int``
"""
self.begin = begin
self.end = end
self.prefix_size = prefix_size
def __repr__(self):
return ('<DimensionDataIpAddress: begin=%s, end=%s, prefix_size=%s>'
% (self.begin, self.end, self.prefix_size))
class DimensionDataPortList(object):
"""
DimensionData Port list
"""
def __init__(self, id, name, description, port_collection,
child_portlist_list,
state, create_time):
""""
Initialize an instance of :class:`DimensionDataPortList`
:param id: GUID of the Port List key
:type id: ``str``
:param name: Name of the Port List
:type name: ``str``
:param description: Description of the Port List
:type description: ``str``
:param port_collection: Collection of DimensionDataPort
:type port_collection: ``List``
:param child_portlist_list: Collection of DimensionDataChildPort
:type child_portlist_list: ``List``
:param state: Port list state
:type state: ``str``
:param create_time: Port List created time
:type create_time: ``date time``
"""
self.id = id
self.name = name
self.description = description
self.port_collection = port_collection
self.child_portlist_list = child_portlist_list
self.state = state
self.create_time = create_time
def __repr__(self):
return (
"<DimensionDataPortList: id=%s, name=%s, description=%s, "
"port_collection=%s, child_portlist_list=%s, state=%s, "
"create_time=%s>"
% (self.id, self.name, self.description,
self.port_collection, self.child_portlist_list, self.state,
self.create_time))
class DimensionDataChildPortList(object):
"""
DimensionData Child Port list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the child port list key
:type id: ``str``
:param name: Name of the child port List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildPortList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataPort(object):
"""
A representation of Port in Dimension Data
"""
def __init__(self, begin, end=None):
"""
Initialize an instance of :class:`DimensionDataPort`
:param begin: Port Number Begin
:type begin: ``str``
:param end: Port Number end
:type end: ``str``
"""
self.begin = begin
self.end = end
def __repr__(self):
return ('<DimensionDataPort: begin=%s, end=%s>'
% (self.begin, self.end))
class DimensionDataNic(object):
"""
A representation of Network Adapter in Dimension Data
"""
def __init__(self, private_ip_v4=None, vlan=None,
network_adapter_name=None):
"""
Initialize an instance of :class:`DimensionDataNic`
:param private_ip_v4: IPv4
:type private_ip_v4: ``str``
:param vlan: Network VLAN
:type vlan: class: DimensionDataVlan or ``str``
:param network_adapter_name: Network Adapter Name
:type network_adapter_name: ``str``
"""
self.private_ip_v4 = private_ip_v4
self.vlan = vlan
self.network_adapter_name = network_adapter_name
def __repr__(self):
return ('<DimensionDataNic: private_ip_v4=%s, vlan=%s,'
'network_adapter_name=%s>'
% (self.private_ip_v4, self.vlan, self.network_adapter_name))
|
PennyQ/astro-vispy
|
refs/heads/master
|
glue_vispy_viewers/isosurface/viewer_state.py
|
6
|
from glue_vispy_viewers.volume.viewer_state import Vispy3DVolumeViewerState
class Vispy3DIsosurfaceViewerState(Vispy3DVolumeViewerState):
pass
|
bryx-inc/boto
|
refs/heads/develop
|
tests/unit/vpc/test_vpngateway.py
|
114
|
# -*- coding: UTF-8 -*-
from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, VpnGateway, Attachment
class TestDescribeVpnGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeVpnGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGatewaySet>
<item>
<vpnGatewayId>vgw-8db04f81</vpnGatewayId>
<state>available</state>
<type>ipsec.1</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments>
<item>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>attached</state>
</item>
</attachments>
<tagSet/>
</item>
</vpnGatewaySet>
</DescribeVpnGatewaysResponse>
"""
def test_get_all_vpn_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_vpn_gateways(
'vgw-8db04f81', filters=OrderedDict([('state', ['pending', 'available']),
('availability-zone', 'us-east-1a')]))
self.assert_request_parameters({
'Action': 'DescribeVpnGateways',
'VpnGatewayId.1': 'vgw-8db04f81',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'availability-zone',
'Filter.2.Value.1': 'us-east-1a'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(api_response), 1)
self.assertIsInstance(api_response[0], VpnGateway)
self.assertEqual(api_response[0].id, 'vgw-8db04f81')
class TestCreateVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGateway>
<vpnGatewayId>vgw-8db04f81</vpnGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments/>
<tagSet/>
</vpnGateway>
</CreateVpnGatewayResponse>
"""
def test_delete_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_vpn_gateway('ipsec.1', 'us-east-1a')
self.assert_request_parameters({
'Action': 'CreateVpnGateway',
'AvailabilityZone': 'us-east-1a',
'Type': 'ipsec.1'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, VpnGateway)
self.assertEquals(api_response.id, 'vgw-8db04f81')
class TestDeleteVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpnGatewayResponse>
"""
def test_delete_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_vpn_gateway('vgw-8db04f81')
self.assert_request_parameters({
'Action': 'DeleteVpnGateway',
'VpnGatewayId': 'vgw-8db04f81'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestAttachVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AttachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<attachment>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>attaching</state>
</attachment>
</AttachVpnGatewayResponse>
"""
def test_attach_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.attach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'AttachVpnGateway',
'VpnGatewayId': 'vgw-8db04f81',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, Attachment)
self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.state, 'attaching')
class TestDetachVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DetachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DetachVpnGatewayResponse>
"""
def test_detach_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.detach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'DetachVpnGateway',
'VpnGatewayId': 'vgw-8db04f81',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestDisableVgwRoutePropagation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisableVgwRoutePropagationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</DisableVgwRoutePropagationResponse>
"""
def test_disable_vgw_route_propagation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disable_vgw_route_propagation(
'rtb-c98a35a0', 'vgw-d8e09e8a')
self.assert_request_parameters({
'Action': 'DisableVgwRoutePropagation',
'GatewayId': 'vgw-d8e09e8a',
'RouteTableId': 'rtb-c98a35a0'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestEnableVgwRoutePropagation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisableVgwRoutePropagationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</DisableVgwRoutePropagationResponse>
"""
def test_enable_vgw_route_propagation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.enable_vgw_route_propagation(
'rtb-c98a35a0', 'vgw-d8e09e8a')
self.assert_request_parameters({
'Action': 'EnableVgwRoutePropagation',
'GatewayId': 'vgw-d8e09e8a',
'RouteTableId': 'rtb-c98a35a0'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
if __name__ == '__main__':
unittest.main()
|
MyGb/PythonApplication
|
refs/heads/master
|
webApp/webApp/settings.py
|
1
|
"""
Django settings for webApp project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b7$0+xy46x7z)9ogfne$h9x*u1&qra8*x8rhf&z+k@_vo1zs0y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'python_test',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
parkouss/treeherder
|
refs/heads/master
|
treeherder/etl/tasks/cleanup_tasks.py
|
9
|
import logging
import urllib
from celery import task
from treeherder.etl.pushlog import MissingHgPushlogProcess
from treeherder.model.derived import RefDataManager
logger = logging.getLogger(__name__)
@task(name='fetch-missing-push-logs')
def fetch_missing_push_logs(missing_pushlogs):
"""
Run several fetch_hg_push_log subtasks, one per repository
"""
with RefDataManager() as rdm:
repos = filter(lambda x: x['url'], rdm.get_all_repository_info())
for repo in repos:
if repo['dvcs_type'] == 'hg' and repo['name'] in missing_pushlogs:
# we must get them one at a time, because if ANY are missing
# from json-pushes, it'll return a 404 for the group.
for resultset in missing_pushlogs[repo['name']]:
fetch_missing_hg_push_logs.apply_async(args=(
repo['name'],
repo['url'],
resultset
),
routing_key='fetch_missing_push_logs'
)
@task(name='fetch-missing-hg-push-logs', time_limit=3 * 60)
def fetch_missing_hg_push_logs(repo_name, repo_url, resultset):
"""
Run a HgPushlog etl process
``revisions`` is a list of changeset values truncated to 12 chars.
"""
process = MissingHgPushlogProcess()
changesetParam = urllib.urlencode({"changeset": resultset}, True)
url_str = repo_url + '/json-pushes/?full=1&version=2&' + changesetParam
logger.info("fetching missing resultsets: {0}".format(url_str))
process.run(url_str, repo_name, resultset)
|
automl/HPOlibConfigSpace
|
refs/heads/master
|
HPOlibConfigSpace/nx/algorithms/components/__init__.py
|
2
|
from HPOlibConfigSpace.nx.algorithms.components.strongly_connected import *
|
aganezov/gos-asm
|
refs/heads/master
|
gos_asm/algo/executable_containers/__init__.py
|
1349
|
# -*- coding: utf-8 -*-
|
spartonia/saleor
|
refs/heads/master
|
saleor/product/migrations/0005_auto_20150825_1433.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_prices.models
import saleor.product.models.fields
class Migration(migrations.Migration):
dependencies = [
('product', '0004_merge'),
]
operations = [
migrations.AlterField(
model_name='fixedproductdiscount',
name='discount',
field=django_prices.models.PriceField(verbose_name='discount value', max_digits=12, decimal_places=2, currency='USD'),
),
migrations.AlterField(
model_name='product',
name='price',
field=django_prices.models.PriceField(verbose_name='price', max_digits=12, decimal_places=2, currency='USD'),
),
migrations.AlterField(
model_name='product',
name='weight',
field=saleor.product.models.fields.WeightField(verbose_name='weight', max_digits=6, decimal_places=2, unit='lb'),
),
migrations.AlterField(
model_name='productvariant',
name='price_override',
field=django_prices.models.PriceField(verbose_name='price override', decimal_places=2, blank=True, currency='USD', max_digits=12, null=True),
),
migrations.AlterField(
model_name='productvariant',
name='weight_override',
field=saleor.product.models.fields.WeightField(verbose_name='weight override', decimal_places=2, blank=True, max_digits=6, unit='lb', null=True),
),
migrations.AlterField(
model_name='stock',
name='cost_price',
field=django_prices.models.PriceField(verbose_name='cost price', decimal_places=2, blank=True, currency='USD', max_digits=12, null=True),
),
]
|
indeedops/dd-agent
|
refs/heads/master
|
tests/core/test_transaction.py
|
8
|
# stdlib
from datetime import datetime, timedelta
import threading
import time
import unittest
# 3rd party
from nose.plugins.attrib import attr
import requests
import simplejson as json
from tornado.web import Application
# project
from config import get_version
from ddagent import (
APIMetricTransaction,
APIServiceCheckTransaction,
MAX_QUEUE_SIZE,
MetricTransaction,
THROTTLING_DELAY,
)
from transaction import Transaction, TransactionManager
class memTransaction(Transaction):
def __init__(self, size, manager):
Transaction.__init__(self)
self._trManager = manager
self._size = size
self._flush_count = 0
self._endpoint = 'https://example.com'
self._api_key = 'a' * 32
self.is_flushable = False
def flush(self):
self._flush_count = self._flush_count + 1
if self.is_flushable:
self._trManager.tr_success(self)
else:
self._trManager.tr_error(self)
self._trManager.flush_next()
class SleepingTransaction(Transaction):
def __init__(self, manager, delay=0.5):
Transaction.__init__(self)
self._trManager = manager
self._size = 1
self._flush_count = 0
self._endpoint = 'https://example.com'
self._api_key = 'a' * 32
self.delay = delay
self.is_flushable = False
def flush(self):
threading.Timer(self.delay, self.post_flush).start()
def post_flush(self):
self._flush_count = self._flush_count + 1
if self.is_flushable:
self._trManager.tr_success(self)
else:
self._trManager.tr_error(self)
self._trManager.flush_next()
@attr(requires='core_integration')
class TestTransaction(unittest.TestCase):
def setUp(self):
pass
def testMemoryLimit(self):
"""Test memory limit as well as simple flush"""
# No throttling, no delay for replay
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
timedelta(seconds=0), max_endpoint_errors=100)
step = 10
oneTrSize = (MAX_QUEUE_SIZE / step) - 1
for i in xrange(step):
trManager.append(memTransaction(oneTrSize, trManager))
trManager.flush()
# There should be exactly step transaction in the list, with
# a flush count of 1
self.assertEqual(len(trManager._transactions), step)
for tr in trManager._transactions:
self.assertEqual(tr._flush_count, 1)
# Try to add one more
trManager.append(memTransaction(oneTrSize + 10, trManager))
# At this point, transaction one (the oldest) should have been removed from the list
self.assertEqual(len(trManager._transactions), step)
for tr in trManager._transactions:
self.assertNotEqual(tr._id, 1)
trManager.flush()
self.assertEqual(len(trManager._transactions), step)
# Check and allow transactions to be flushed
for tr in trManager._transactions:
tr.is_flushable = True
# Last transaction has been flushed only once
if tr._id == step + 1:
self.assertEqual(tr._flush_count, 1)
else:
self.assertEqual(tr._flush_count, 2)
trManager.flush()
self.assertEqual(len(trManager._transactions), 0)
def testThrottling(self):
"""Test throttling while flushing"""
# No throttling, no delay for replay
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
THROTTLING_DELAY, max_endpoint_errors=100)
trManager._flush_without_ioloop = True # Use blocking API to emulate tornado ioloop
# Add 3 transactions, make sure no memory limit is in the way
oneTrSize = MAX_QUEUE_SIZE / 10
for i in xrange(3):
tr = memTransaction(oneTrSize, trManager)
trManager.append(tr)
# Try to flush them, time it
before = datetime.utcnow()
trManager.flush()
after = datetime.utcnow()
self.assertTrue((after - before) > 3 * THROTTLING_DELAY - timedelta(microseconds=100000),
"before = %s after = %s" % (before, after))
def testCustomEndpoint(self):
MetricTransaction._endpoints = []
config = {
"endpoints": {"https://foo.bar.com": ["foo"]},
"dd_url": "https://foo.bar.com",
"api_key": "foo",
"use_dd": True
}
app = Application()
app.skip_ssl_validation = False
app._agentConfig = config
app.use_simple_http_client = True
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
THROTTLING_DELAY, max_endpoint_errors=100)
trManager._flush_without_ioloop = True # Use blocking API to emulate tornado ioloop
MetricTransaction._trManager = trManager
MetricTransaction.set_application(app)
MetricTransaction.set_endpoints(config['endpoints'])
transaction = MetricTransaction(None, {}, "msgtype")
endpoints = []
for endpoint in transaction._endpoints:
for api_key in transaction._endpoints[endpoint]:
endpoints.append(transaction.get_url(endpoint, api_key))
expected = ['https://foo.bar.com/intake/msgtype?api_key=foo']
self.assertEqual(endpoints, expected, (endpoints, expected))
def testEndpoints(self):
"""
Tests that the logic behind the agent version specific endpoints is ok.
Also tests that these endpoints actually exist.
"""
MetricTransaction._endpoints = []
api_key = "a" * 32
config = {
"endpoints": {"https://app.datadoghq.com": [api_key]},
"dd_url": "https://app.datadoghq.com",
"api_key": api_key,
"use_dd": True
}
app = Application()
app.skip_ssl_validation = False
app._agentConfig = config
app.use_simple_http_client = True
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
THROTTLING_DELAY, max_endpoint_errors=100)
trManager._flush_without_ioloop = True # Use blocking API to emulate tornado ioloop
MetricTransaction._trManager = trManager
MetricTransaction.set_application(app)
MetricTransaction.set_endpoints(config['endpoints'])
transaction = MetricTransaction(None, {}, "")
endpoints = []
for endpoint in transaction._endpoints:
for api_key in transaction._endpoints[endpoint]:
endpoints.append(transaction.get_url(endpoint, api_key))
expected = ['https://{0}-app.agent.datadoghq.com/intake/?api_key={1}'.format(
get_version().replace(".", "-"), api_key)]
self.assertEqual(endpoints, expected, (endpoints, expected))
for url in endpoints:
r = requests.post(url, data=json.dumps({"foo": "bar"}),
headers={'Content-Type': "application/json"})
r.raise_for_status()
# API Metric Transaction
transaction = APIMetricTransaction(None, {})
endpoints = []
for endpoint in transaction._endpoints:
for api_key in transaction._endpoints[endpoint]:
endpoints.append(transaction.get_url(endpoint, api_key))
expected = ['https://{0}-app.agent.datadoghq.com/api/v1/series/?api_key={1}'.format(
get_version().replace(".", "-"), api_key)]
self.assertEqual(endpoints, expected, (endpoints, expected))
for url in endpoints:
r = requests.post(url, data=json.dumps({"foo": "bar"}),
headers={'Content-Type': "application/json"})
r.raise_for_status()
# API Service Check Transaction
APIServiceCheckTransaction._trManager = trManager
APIServiceCheckTransaction.set_application(app)
APIServiceCheckTransaction.set_endpoints(config['endpoints'])
transaction = APIServiceCheckTransaction(None, {})
endpoints = []
for endpoint in transaction._endpoints:
for api_key in transaction._endpoints[endpoint]:
endpoints.append(transaction.get_url(endpoint, api_key))
expected = ['https://{0}-app.agent.datadoghq.com/api/v1/check_run/?api_key={1}'.format(
get_version().replace(".", "-"), api_key)]
self.assertEqual(endpoints, expected, (endpoints, expected))
for url in endpoints:
r = requests.post(url, data=json.dumps({'check': 'test', 'status': 0}),
headers={'Content-Type': "application/json"})
r.raise_for_status()
def test_endpoint_error(self):
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
timedelta(seconds=0), max_endpoint_errors=2)
step = 10
oneTrSize = (MAX_QUEUE_SIZE / step) - 1
for i in xrange(step):
trManager.append(memTransaction(oneTrSize, trManager))
trManager.flush()
# There should be exactly step transaction in the list,
# and only 2 of them with a flush count of 1
self.assertEqual(len(trManager._transactions), step)
flush_count = 0
for tr in trManager._transactions:
flush_count += tr._flush_count
self.assertEqual(flush_count, 2)
# If we retry to flush, two OTHER transactions should be tried
trManager.flush()
self.assertEqual(len(trManager._transactions), step)
flush_count = 0
for tr in trManager._transactions:
flush_count += tr._flush_count
self.assertIn(tr._flush_count, [0, 1])
self.assertEqual(flush_count, 4)
# Finally when it's possible to flush, everything should go smoothly
for tr in trManager._transactions:
tr.is_flushable = True
trManager.flush()
self.assertEqual(len(trManager._transactions), 0)
@attr('unix')
def test_parallelism(self):
step = 4
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
timedelta(seconds=0), max_parallelism=step,
max_endpoint_errors=100)
for i in xrange(step):
trManager.append(SleepingTransaction(trManager))
trManager.flush()
self.assertEqual(trManager._running_flushes, step)
self.assertEqual(trManager._finished_flushes, 0)
# If _trs_to_flush != None, it means that it's still running as it should be
self.assertEqual(trManager._trs_to_flush, [])
time.sleep(1)
# It should be finished
self.assertEqual(trManager._running_flushes, 0)
self.assertEqual(trManager._finished_flushes, step)
self.assertIs(trManager._trs_to_flush, None)
def test_no_parallelism(self):
step = 2
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
timedelta(seconds=0), max_parallelism=1,
max_endpoint_errors=100)
for i in xrange(step):
trManager.append(SleepingTransaction(trManager, delay=1))
trManager.flush()
# Flushes should be sequential
for i in xrange(step):
self.assertEqual(trManager._running_flushes, 1)
self.assertEqual(trManager._finished_flushes, i)
self.assertEqual(len(trManager._trs_to_flush), step - (i + 1))
time.sleep(1.3)
# Once it's finished
self.assertEqual(trManager._running_flushes, 0)
self.assertEqual(trManager._finished_flushes, 2)
self.assertIs(trManager._trs_to_flush, None)
def test_multiple_endpoints(self):
config = {
"endpoints": {
"https://app.datadoghq.com": ['api_key'],
"https://app.example.com": ['api_key']
},
"dd_url": "https://app.datadoghq.com",
"api_key": 'api_key',
"use_dd": True
}
app = Application()
app.skip_ssl_validation = False
app._agentConfig = config
app.use_simple_http_client = True
trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
THROTTLING_DELAY, max_endpoint_errors=100)
trManager._flush_without_ioloop = True # Use blocking API to emulate tornado ioloop
MetricTransaction._trManager = trManager
MetricTransaction.set_application(app)
MetricTransaction.set_endpoints(config['endpoints'])
MetricTransaction({}, {})
# 2 endpoints = 2 transactions
self.assertEqual(len(trManager._transactions), 2)
self.assertEqual(trManager._transactions[0]._endpoint, 'https://app.datadoghq.com')
self.assertEqual(trManager._transactions[1]._endpoint, 'https://app.example.com')
|
ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones
|
refs/heads/master
|
code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/11-naming_a_slice/main.py
|
3
|
def example_1():
###### 0123456789012345678901234567890123456789012345678901234567890
record = ' 100 513.25 '
cost = int(record[20:32]) * float(record[40:48])
print(cost)
SHARES = slice(20, 32)
PRICE = slice(40, 48)
cost = int(record[SHARES]) * float(record[PRICE])
print(cost)
def example_2():
items = [0, 1, 2, 3, 4, 5, 6]
a = slice(2, 4)
print(items[2:4], items[a])
items[a] = [10, 11]
print(items)
del items[a]
print(items)
a = slice(10, 50, 2)
print(a.start)
print(a.stop)
print(a.step)
def example_3():
s = 'HelloWorld'
a = slice(0, 10, 2)
print(a.indices(len(s)))
for i in range(*a.indices(len(s))):
print(s[i])
if __name__ == '__main__':
example_1()
example_2()
example_3()
|
ales-erjavec/orange
|
refs/heads/master
|
install-scripts/orngServer/orngServerFilesServer.py
|
6
|
import sys
sys.path.insert(0,"../CherryPy-3.1.0")
import cherrypy
print "Loaded CherryPy version", cherrypy.__version__
import os
import shutil
import re
import hashlib
import threading
import cgi
pj = os.path.join
import datetime
basedir = pj(os.getcwd(), "..", "orngServerData")
userfilename = '../orngServerFilesUsers.txt'
def readUserFile():
s = open(userfilename, 'rt').read()
umap = {}
for line in s.splitlines():
try:
uname, passw = line.split(',')
umap[uname] = passw
except:
pass
print "USERS", umap.keys()
return umap
def noBodyProcess():
"""Sets cherrypy.request.process_request_body = False, giving
us direct control of the file upload destination. By default
cherrypy loads it to memory, we are directing it to disk."""
#cherrypy.request.process_request_body = False
print "noBodyProcess"
print "LOGIN", cherrypy.request.login
print "PROCESS RB", cherrypy.request.process_request_body
if not cherrypy.request.login:
cherrypy.request.process_request_body = False
cherrypy.tools.noBodyProcess = cherrypy.Tool('on_start_resource', noBodyProcess, priority=20)
class FileInfo(object):
separ = '|||||'
def __init__(self, fname):
self.fname = fname
self.set()
def set(self, name=None, protection=None, datetime=None, title=None, tags=[]):
self.name = name
self.protection = protection
self.datetime = datetime
self.title = title
self.tags = tags
def load(self):
f = open(self.fname, 'rb')
cont = f.read()
f.close()
#print "CONT", cont
name, protection, datetime, title, tags = cont.splitlines()
tags = tags.split(";")
self.set(name, protection, datetime, title, tags)
def userInfo(self):
return self.separ.join([\
str(os.stat(self.fname + ".file").st_size), \
str(self.datetime), \
self.title, \
";".join(self.tags) \
])
def save(self, fname=None):
if not fname:
fname = self.fname
f = open(fname, 'wb')
cont = '\n'.join([self.name, self.protection, str(self.datetime), \
self.title, ";".join(self.tags)])
#print "WRITING", cont
f.write(cont)
f.close()
def exists(self):
"""
If file info already exists as a file.
"""
return self.protection != None
"""
Only one client can edit data for now!
FIXME: allow multiple clients changing different files.
Try locking a specific basename!
"""
sem = threading.BoundedSemaphore()
def lock(**kwargs):
#print "locking"
sem.acquire()
def unlock(**kwargs):
sem.release()
#print "unlocking"
cherrypy.tools.lock = cherrypy.Tool('on_start_resource', lock, priority=2)
cherrypy.tools.unlock = cherrypy.Tool('on_end_request', unlock)
"""
End of simple locking tools
"""
rec = re.compile("[^A-Za-z0-9\-\.\_]")
def safeFilename(s):
return rec.sub("", s)
def hash(s):
"""
May hashing function.
"""
return hashlib.sha256(s).hexdigest()
def baseDomain(domain):
domain = safeFilename(domain) #force safe domain
return pj(basedir, domain)
def baseFilename(domain, filename):
"""
Return base filename for saving on disk: composed of only
lowercase characters. First part are first 100 alphanumeric
characters from the filename, next its hash.
"""
return pj(baseDomain(domain), \
safeFilename(filename.lower())[:100] + "." + hash(filename))
def fileInfo(domain, filename):
"""
Each file is saved in two files: its index and its data.
It is possible that multiple files get the same name. Therefore
enumerate them. If filename is the same also in index, then this
is the same file.
Returns file's FileInfo. If file does not exists, then its fileinfo
has only attribute fname.
"""
basename = baseFilename(domain, filename)
candidate = 1
filei = None
while 1:
try:
fileit = FileInfo(basename + "." + str(candidate))
fileit.load()
if fileit.name == filename:
filei = fileit
break
except IOError:
break # no file - file is free to be taken
candidate += 1
if not filei:
filei = FileInfo(basename + "." + str(candidate))
return filei
def userFileInfo(domain, filename, protected=False):
fi = fileInfo(domain, filename)
if accessAllowed(fi, protected):
return fi.userInfo()
else:
return "None"
def accessAllowed(fi, protected):
"""
protected == access_code or True (allow everything) or False (public access)
Allow access if:
- there is no protection or
- protected == True -> administrative account
- fi.protection == protected and it is not "1"
"""
#return fi.protection == "0" or protected == True or (fi.protection == protected)
return fi.protection == "0" or protected == True or (fi.protection != "1" and fi.protection == protected)
def downloadFile(domain, filename, protected=False):
fi = fileInfo(domain, filename)
if accessAllowed(fi, protected):
return cherrypy.lib.static.serve_file(fi.fname + ".file", "application/x-download", "attachment", filename)
else:
raise cherrypy.HTTPError(500, "File not available!")
def listFilesL(domain, protected=False):
dir = baseDomain(domain)
files = [ a for a in os.listdir(dir) if a[-1].isdigit() ]
okfiles = []
for file in files:
fi = FileInfo(pj(dir, file))
try:
fi.load()
if fi.exists() and accessAllowed(fi, protected):
okfiles.append(fi.name)
except:
pass
return okfiles
def listFiles(domain, protected=False):
return "|||||".join(listFilesL(domain, protected=protected))
def listdomainsL():
dir = basedir
files = [ a for a in os.listdir(dir) ]
ok = []
for file in files:
if os.path.isdir(os.path.join(dir, file)):
ok.append(file)
return ok
def listdomains():
return "|||||".join(listdomainsL())
def allFileInfosL(domain, protected=False):
files = listFilesL(domain, protected=protected)
out = []
for filename in files:
info = userFileInfo(domain, filename, protected=protected)
out.append((filename, info))
return out
def allFileInfos(domain, protected=False):
ai = allFileInfosL(domain, protected=protected)
return [ "[[[[[".join( [ a + "=====" + b for a,b in ai ] ) ]
class RootServer:
@cherrypy.expose
def index(self):
return """"""
class PublicServer:
@cherrypy.expose
def index(self):
return """"""
@cherrypy.expose
def info(self, domain, filename, access_code=None):
return userFileInfo(domain, filename, protected=access_code)
@cherrypy.expose
def allinfo(self, domain, access_code=None):
return allFileInfos(domain, protected=access_code)
@cherrypy.expose
def download(self, domain, filename, access_code=None):
return downloadFile(domain, filename, protected=access_code)
@cherrypy.expose
def list(self, domain, access_code=None):
return listFiles(domain, protected=access_code)
@cherrypy.expose
def listdomains(self):
return listdomains()
class SecureServer:
@cherrypy.expose
def index(self):
return """"""
@cherrypy.expose
def info(self, domain, filename):
return userFileInfo(domain, filename, protected=True)
@cherrypy.expose
def allinfo(self, domain):
return allFileInfos(domain, protected=True)
@cherrypy.expose
def download(self, domain, filename):
return downloadFile(domain, filename, protected=True)
@cherrypy.expose
def list(self, domain):
return listFiles(domain, protected=True)
@cherrypy.expose
def createdomain(self, domain):
"""
Creates domain. If creation is successful, return 0, else
return error.
"""
dir = baseDomain(domain)
os.mkdir(dir)
return "0"
@cherrypy.expose
def removedomain(self, domain, force=False):
"""
Removes domain. If successful return 0, else
return error.
"""
dir = baseDomain(domain)
if not force:
os.rmdir(dir)
else:
shutil.rmtree(dir)
return '0'
@cherrypy.expose
@cherrypy.tools.lock()
@cherrypy.tools.unlock()
def remove(self, domain, filename):
fi = fileInfo(domain, filename)
if fi.exists(): #valid file
os.remove(fi.fname)
os.remove(fi.fname+".file")
return "0"
else:
raise cherrypy.HTTPError(500, "File does not exists.")
@cherrypy.expose
@cherrypy.tools.lock()
@cherrypy.tools.unlock()
def upload(self, domain, filename, title, tags, data):
fi = fileInfo(domain, filename)
#print data.file.name
fupl = open(fi.fname + ".uploading", 'wb')
shutil.copyfileobj(data.file, fupl, 1024*8) #copy with buffer
fupl.close()
#print "transfer successful?" #TODO check this - MD5?
#TODO is there any difference in those files?
fupl = open(fi.fname + ".uploading", 'rb')
ffin = open(fi.fname + ".file", 'wb')
shutil.copyfileobj(fupl, ffin) #copy with buffer
ffin.close()
fupl.close()
#remove file copy
os.remove(fi.fname + ".uploading")
datetime_now = str(datetime.datetime.utcnow())
fi.datetime = datetime_now
fi.name = filename
if fi.protection == None:
fi.protection = "1" #only administrative access by default
fi.title = title
fi.tags = tags.split(";")
fi.save()
@cherrypy.expose
@cherrypy.tools.lock()
@cherrypy.tools.unlock()
def protect(self, domain, filename, access_code="1"):
fi = fileInfo(domain, filename)
if fi.exists():
fi.protection = access_code
fi.save()
else:
raise cherrypy.HTTPError(500, "File does not exists.")
@cherrypy.expose
@cherrypy.tools.lock()
@cherrypy.tools.unlock()
def unprotect(self, domain, filename):
#DEPRECATED
fi = fileInfo(domain, filename)
if fi.exists():
fi.protection = "0"
fi.save()
else:
raise cherrypy.HTTPError(500, "File does not exists.")
@cherrypy.expose
@cherrypy.tools.lock()
@cherrypy.tools.unlock()
def protection(self, domain, filename):
fi = fileInfo(domain, filename)
if fi.exists():
return fi.protection
else:
raise cherrypy.HTTPError(500, "File does not exists.")
@cherrypy.expose
def listdomains(self):
return listdomains()
"""
Tools for enforcing security measures.
Also,
"""
def force_secure(header="Secure"):
secure = cherrypy.request.headers.get(header, False)
if not secure:
raise cherrypy.HTTPError(500, "Use ssl!")
def force_insecure(header="Secure"):
secure = cherrypy.request.headers.get(header, False)
if secure:
raise cherrypy.HTTPError(500, "Do not use ssl!")
cherrypy.tools.secure = cherrypy.Tool('on_start_resource', force_secure, priority=1)
cherrypy.tools.insecure = cherrypy.Tool('on_start_resource', force_insecure, priority=1)
# remove any limit on the request body size; cherrypy's default is 100MB
cherrypy.server.max_request_body_size = 0
def buildServer():
users = readUserFile()
conf = {'global': { 'log.screen': False,
'log.access_file': 'af.log',
'log.error_file': 'ef.log' },
'/public': { 'tools.insecure.on': True},
'/private': { 'tools.secure.on': True,
'tools.basic_auth.on': True,
'tools.basic_auth.realm': 'orngFileServer',
'tools.basic_auth.users': users,
'tools.basic_auth.encrypt': lambda x: x,
}}
root = RootServer()
root.public = PublicServer()
root.private = SecureServer()
return root, conf
if __name__ == '__main__':
root, conf = buildServer()
cherrypy.tree.mount(root, '/', conf)
cherrypy.engine.start()
cherrypy.engine.block()
|
harmy/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/distutils/tests/test_extension.py
|
6
|
"""Tests for distutils.extension."""
import unittest
import os
import warnings
from test.support import check_warnings, run_unittest
from distutils.extension import read_setup_file, Extension
class ExtensionTestCase(unittest.TestCase):
def test_read_setup_file(self):
# trying to read a Setup file
# (sample extracted from the PyGame project)
setup = os.path.join(os.path.dirname(__file__), 'Setup.sample')
exts = read_setup_file(setup)
names = [ext.name for ext in exts]
names.sort()
# here are the extensions read_setup_file should have created
# out of the file
wanted = ['_arraysurfarray', '_camera', '_numericsndarray',
'_numericsurfarray', 'base', 'bufferproxy', 'cdrom',
'color', 'constants', 'display', 'draw', 'event',
'fastevent', 'font', 'gfxdraw', 'image', 'imageext',
'joystick', 'key', 'mask', 'mixer', 'mixer_music',
'mouse', 'movie', 'overlay', 'pixelarray', 'pypm',
'rect', 'rwobject', 'scrap', 'surface', 'surflock',
'time', 'transform']
self.assertEqual(names, wanted)
def test_extension_init(self):
# the first argument, which is the name, must be a string
self.assertRaises(AssertionError, Extension, 1, [])
ext = Extension('name', [])
self.assertEqual(ext.name, 'name')
# the second argument, which is the list of files, must
# be a list of strings
self.assertRaises(AssertionError, Extension, 'name', 'file')
self.assertRaises(AssertionError, Extension, 'name', ['file', 1])
ext = Extension('name', ['file1', 'file2'])
self.assertEqual(ext.sources, ['file1', 'file2'])
# others arguments have defaults
for attr in ('include_dirs', 'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'export_symbols', 'swig_opts', 'depends'):
self.assertEqual(getattr(ext, attr), [])
self.assertEqual(ext.language, None)
self.assertEqual(ext.optional, None)
# if there are unknown keyword options, warn about them
with check_warnings() as w:
warnings.simplefilter('always')
ext = Extension('name', ['file1', 'file2'], chic=True)
self.assertEqual(len(w.warnings), 1)
self.assertEqual(str(w.warnings[0].message),
"Unknown Extension options: 'chic'")
def test_suite():
return unittest.makeSuite(ExtensionTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
dbcls/dbcls-galaxy
|
refs/heads/master
|
tools/maf/interval_maf_to_merged_fasta.py
|
1
|
#!/usr/bin/env python
"""
Reads an interval or gene BED and a MAF Source.
Produces a FASTA file containing the aligned intervals/gene sequences, based upon the provided coordinates
Alignment blocks are layered ontop of each other based upon score.
usage: %prog maf_file [options]
-d, --dbkey=d: Database key, ie hg17
-c, --chromCol=c: Column of Chr
-s, --startCol=s: Column of Start
-e, --endCol=e: Column of End
-S, --strandCol=S: Column of Strand
-G, --geneBED: Input is a Gene BED file, process and join exons as one region
-t, --mafSourceType=t: Type of MAF source to use
-m, --mafSource=m: Path of source MAF file, if not using cached version
-I, --mafIndex=I: Path of precomputed source MAF file index, if not using cached version
-i, --interval_file=i: Input interval file
-o, --output_file=o: Output MAF file
-p, --species=p: Species to include in output
-z, --mafIndexFileDir=z: Directory of local maf_index.loc file
usage: %prog dbkey_of_BED comma_separated_list_of_additional_dbkeys_to_extract comma_separated_list_of_indexed_maf_files input_gene_bed_file output_fasta_file cached|user GALAXY_DATA_INDEX_DIR
"""
#Dan Blankenberg
from galaxy import eggs
from galaxy.tools.util import maf_utilities
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
import bx.intervals.io
import sys
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def __main__():
#Parse Command Line
options, args = doc_optparse.parse( __doc__ )
mincols = 0
strand_col = -1
if options.dbkey:
primary_species = options.dbkey
else:
primary_species = None
if primary_species in [None, "?", "None"]:
stop_err( "You must specify a proper build in order to extract alignments. You can specify your genome build by clicking on the pencil icon associated with your interval file." )
include_primary = True
secondary_species = maf_utilities.parse_species_option( options.species )
if secondary_species:
species = list( secondary_species ) # make copy of species list
if primary_species in secondary_species:
secondary_species.remove( primary_species )
else:
include_primary = False
else:
species = None
if options.interval_file:
interval_file = options.interval_file
else:
stop_err( "Input interval file has not been specified." )
if options.output_file:
output_file = options.output_file
else:
stop_err( "Output file has not been specified." )
if not options.geneBED:
if options.chromCol:
chr_col = int( options.chromCol ) - 1
else:
stop_err( "Chromosome column not set, click the pencil icon in the history item to set the metadata attributes." )
if options.startCol:
start_col = int( options.startCol ) - 1
else:
stop_err( "Start column not set, click the pencil icon in the history item to set the metadata attributes." )
if options.endCol:
end_col = int( options.endCol ) - 1
else:
stop_err( "End column not set, click the pencil icon in the history item to set the metadata attributes." )
if options.strandCol:
strand_col = int( options.strandCol ) - 1
mafIndexFile = "%s/maf_index.loc" % options.mafIndexFileDir
#Finish parsing command line
#get index for mafs based on type
index = index_filename = None
#using specified uid for locally cached
if options.mafSourceType.lower() in ["cached"]:
index = maf_utilities.maf_index_by_uid( options.mafSource, mafIndexFile )
if index is None:
stop_err( "The MAF source specified (%s) appears to be invalid." % ( options.mafSource ) )
elif options.mafSourceType.lower() in ["user"]:
#index maf for use here, need to remove index_file when finished
index, index_filename = maf_utilities.open_or_build_maf_index( options.mafSource, options.mafIndex, species = [primary_species] )
if index is None:
stop_err( "Your MAF file appears to be malformed." )
else:
stop_err( "Invalid MAF source type specified." )
#open output file
output = open( output_file, "w" )
if options.geneBED:
region_enumerator = maf_utilities.line_enumerator( open( interval_file, "r" ).readlines() )
else:
region_enumerator = enumerate( bx.intervals.io.NiceReaderWrapper( open( interval_file, 'r' ), chrom_col = chr_col, start_col = start_col, end_col = end_col, strand_col = strand_col, fix_strand = True, return_header = False, return_comments = False ) )
#Step through intervals
regions_extracted = 0
line_count = 0
for line_count, line in region_enumerator:
try:
if options.geneBED: #Process as Gene BED
try:
starts, ends, fields = maf_utilities.get_starts_ends_fields_from_gene_bed( line )
#create spliced alignment object
alignment = maf_utilities.get_spliced_region_alignment( index, primary_species, fields[0], starts, ends, strand = '+', species = species, mincols = mincols )
primary_name = secondary_name = fields[3]
alignment_strand = fields[5]
except Exception, e:
print "Error loading exon positions from input line %i: %s" % ( line_count, e )
continue
else: #Process as standard intervals
try:
#create spliced alignment object
alignment = maf_utilities.get_region_alignment( index, primary_species, line.chrom, line.start, line.end, strand = '+', species = species, mincols = mincols )
primary_name = "%s(%s):%s-%s" % ( line.chrom, line.strand, line.start, line.end )
secondary_name = ""
alignment_strand = line.strand
except Exception, e:
print "Error loading region positions from input line %i: %s" % ( line_count, e )
continue
#Write alignment to output file
#Output primary species first, if requested
if include_primary:
output.write( ">%s.%s\n" %( primary_species, primary_name ) )
if alignment_strand == "-":
output.write( alignment.get_sequence_reverse_complement( primary_species ) )
else:
output.write( alignment.get_sequence( primary_species ) )
output.write( "\n" )
#Output all remainging species
for spec in secondary_species or alignment.get_species_names( skip = primary_species ):
if secondary_name:
output.write( ">%s.%s\n" % ( spec, secondary_name ) )
else:
output.write( ">%s\n" % ( spec ) )
if alignment_strand == "-":
output.write( alignment.get_sequence_reverse_complement( spec ) )
else:
output.write( alignment.get_sequence( spec ) )
output.write( "\n" )
output.write( "\n" )
regions_extracted += 1
except Exception, e:
print "Unexpected error from input line %i: %s" % ( line_count, e )
continue
#close output file
output.close()
#remove index file if created during run
maf_utilities.remove_temp_index_file( index_filename )
#Print message about success for user
if regions_extracted > 0:
print "%i regions were processed successfully." % ( regions_extracted )
else:
print "No regions were processed successfully."
if line_count > 0 and options.geneBED:
print "This tool requires your input file to conform to the 12 column BED standard."
if __name__ == "__main__": __main__()
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/helpers/tests/generator3_tests/data/SkeletonGeneration/origin_stamp_in_skeleton_header_is_updated_on_copying/after/cache/e3b0c44298/mod.py
|
24
|
# encoding: utf-8
# module mod
# from some/existing/binary.py
# by generator 1000.0
# no doc
# no imports
# no functions
# no classes
|
baishancloud/pykit
|
refs/heads/master
|
mathtoy/test/test_mathtoy.py
|
2
|
#!/usr/bin/env python
# coding: utf-8
import unittest
from pykit import ututil
from pykit.mathtoy import Matrix
from pykit.mathtoy import Polynomial
from pykit.mathtoy import Vector
dd = ututil.dd
class TestVector(unittest.TestCase):
def test_vector(self):
a = Vector([1, 2, 3])
b = a + [2, 3, 4]
self.assertEqual([3, 5, 7], b)
self.assertEqual([1, 2, 3], a)
c = b - a
self.assertEqual([2, 3, 4], c)
b -= a
self.assertEqual([2, 3, 4], b)
d = a.inner_product([2, 3, 4])
self.assertEqual(20, d)
def test_vector_val(self):
a = Vector([1, 2, 3])
self.assertEqual([2, 4, 6], a * 2)
self.assertEqual([1, 4, 9], a**2)
class TestMatrix(unittest.TestCase):
def test_minor(self):
a = Matrix([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
self.assertEqual([[5, 6],
[8, 9], ],
a.minor(0, 0))
self.assertEqual([[1, 2],
[4, 5], ],
a.minor(2, 2))
def test_determinant(self):
a = Matrix([[2], ])
self.assertEqual(2, a.determinant())
a = Matrix([[1, 2],
[4, 5],
])
self.assertEqual(-3, a.determinant())
a = Matrix([[-2, 2, -3],
[-1, 1, 3],
[2, 0, 1],
])
self.assertEqual(18, a.determinant())
def test_replace(self):
a = Matrix([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
a.replace_row(0, [0, 0, 0])
self.assertEqual([[0, 0, 0],
[4, 5, 6],
[7, 8, 9]
], a)
a.replace_col(1, [8, 8, 8])
self.assertEqual([[0, 8, 0],
[4, 8, 6],
[7, 8, 9]
], a)
def test_solve(self):
cases = (
([[3, 5],
[1, 2]],
[4, 1],
[3, -1]),
)
for m, y, expected in cases:
m = Matrix(m)
y = Vector(y)
x = m.solve(y)
self.assertEqual(expected, x)
class TestPolynomial(unittest.TestCase):
def test_partial_derivative(self):
xs = [1, 2, 3, 4]
ys = [6, 5, 7, 10]
m, yys = Polynomial.get_fitting_equation(xs, ys, degree=1)
self.assertEqual([[8, 20],
[20, 60], ], m)
self.assertEqual([56, 154], yys)
def test_fit(self):
xs = [1, 2, 3, 4]
ys = [6, 5, 7, 10]
coef = Polynomial.fit(xs, ys, degree=1)
self.assertEqual([3.5, 1.4], coef)
coef = Polynomial.fit(xs, ys, degree=2)
self.assertEqual([8.5, -3.6, 1], coef)
def test_plot(self):
# TODO this is a sample
print
xs = [1, 2, 3, 4]
ys = [6, 5, 7, 10]
for deg in (0, 1, 2, 3):
poly = Polynomial.fit(xs, ys, degree=deg)
print 'y =', poly
y5 = Polynomial.evaluate(poly, 5)
print 'y(5) =', y5
p1 = Polynomial.fit(xs, ys, degree=1)
for l in Polynomial.plot([(p1, '.'), (poly, 'o'), ],
(0, 6),
width=60, height=12, points=zip(xs + [5], ys + [y5], ['X', 'X', 'X', 'X', '*'])):
print l
|
emedvedev/st2
|
refs/heads/master
|
st2actions/st2actions/notifier/notifier.py
|
1
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import trace as trace_service
from st2common.transport import consumers, liveaction, publishers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import isotime
from st2common.util import jinja as jinja_utils
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX
from st2common.constants.action import ACTION_RESULTS_KV_PREFIX
from st2common.constants.keyvalue import SYSTEM_SCOPE
from st2common.services.keyvalues import KeyValueLookup
__all__ = [
'Notifier',
'get_notifier'
]
LOG = logging.getLogger(__name__)
ACTIONUPDATE_WORK_Q = liveaction.get_queue('st2.notifiers.work',
routing_key=publishers.UPDATE_RK)
ACTION_SENSOR_ENABLED = cfg.CONF.action_sensor.enable
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][1]
class Notifier(consumers.MessageHandler):
message_type = LiveActionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
if not trigger_dispatcher:
trigger_dispatcher = TriggerDispatcher(LOG)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE['pack'],
name=NOTIFY_TRIGGER_TYPE['name'])
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE['pack'],
name=ACTION_TRIGGER_TYPE['name'])
def process(self, liveaction):
live_action_id = str(liveaction.id)
extra = {'live_action_db': liveaction}
LOG.debug('Processing liveaction %s', live_action_id, extra=extra)
if liveaction.status not in LIVEACTION_COMPLETED_STATES:
LOG.debug('Skipping processing of liveaction %s since it\'s not in a completed state' %
(live_action_id), extra=extra)
return
execution = self._get_execution_for_liveaction(liveaction)
if not execution:
LOG.exception('Execution object corresponding to LiveAction %s not found.',
live_action_id, extra=extra)
return None
self._apply_post_run_policies(liveaction_db=liveaction)
if liveaction.notify is not None:
self._post_notify_triggers(liveaction=liveaction, execution=execution)
self._post_generic_trigger(liveaction=liveaction, execution=execution)
def _get_execution_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return execution
def _post_notify_triggers(self, liveaction=None, execution=None):
notify = getattr(liveaction, 'notify', None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_complete,
default_message_suffix='completed.')
if liveaction.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_success,
default_message_suffix='succeeded.')
if liveaction.status in LIVEACTION_FAILED_STATES and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_failure,
default_message_suffix='failed.')
def _post_notify_subsection_triggers(self, liveaction=None, execution=None,
notify_subsection=None,
default_message_suffix=None):
routes = (getattr(notify_subsection, 'routes') or
getattr(notify_subsection, 'channels', None))
execution_id = str(execution.id)
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
'Action ' + liveaction.action + ' ' + default_message_suffix)
data = notify_subsection.data or {}
jinja_context = self._build_jinja_context(liveaction=liveaction, execution=execution)
try:
message = self._transform_message(message=message,
context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `message`.')
try:
data = self._transform_data(data=data, context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `data`.')
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data['result'] = json.dumps(liveaction.result)
payload['message'] = message
payload['data'] = data
payload['execution_id'] = execution_id
payload['status'] = liveaction.status
payload['start_timestamp'] = isotime.format(liveaction.start_timestamp)
payload['end_timestamp'] = isotime.format(liveaction.end_timestamp)
payload['action_ref'] = liveaction.action
payload['runner_ref'] = self._get_runner_ref(liveaction.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload['route'] = route
# Deprecated. Only for backward compatibility reasons.
payload['channel'] = route
LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'],
liveaction.id, payload)
self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload,
trace_context=trace_context)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception('Failed notifications to routes: %s' % ', '.join(failed_routes))
def _build_jinja_context(self, liveaction, execution):
context = {SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)}
context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction.parameters})
context.update({ACTION_CONTEXT_KV_PREFIX: liveaction.context})
context.update({ACTION_RESULTS_KV_PREFIX: execution.result})
return context
def _transform_message(self, message, context=None):
mapping = {'message': message}
context = context or {}
return (jinja_utils.render_values(mapping=mapping, context=context)).get('message',
message)
def _transform_data(self, data, context=None):
return jinja_utils.render_values(mapping=data, context=context)
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction=None, execution=None):
if not ACTION_SENSOR_ENABLED:
LOG.debug('Action trigger is disabled, skipping trigger dispatch...')
return
execution_id = str(execution.id)
payload = {'execution_id': execution_id,
'status': liveaction.status,
'start_timestamp': str(liveaction.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
'action_name': liveaction.action,
'action_ref': liveaction.action,
'runner_ref': self._get_runner_ref(liveaction.action),
'parameters': liveaction.get_masked_parameters(),
'result': liveaction.result}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s',
ACTION_TRIGGER_TYPE['name'], liveaction.id, payload, trace_context)
self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload,
trace_context=trace_context)
def _apply_post_run_policies(self, liveaction_db):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction_db.action, enabled=True)
LOG.debug('Applying %s post_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying post_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction_db.id)))
liveaction_db = driver.apply_after(liveaction_db)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
return liveaction_db
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action['runner_type']['name']
def get_notifier():
with Connection(transport_utils.get_messaging_urls()) as conn:
return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
|
shahbaz17/zamboni
|
refs/heads/master
|
mkt/account/urls.py
|
2
|
from django.conf.urls import include, patterns, url
from mkt.account.views import (AccountView, FeedbackView, FxALoginView,
GroupsViewSet, InstalledViewSet, LoginView,
LogoutView, NewsletterView, PermissionsView)
from mkt.feed.views import FeedShelfViewSet
from mkt.users import views
drf_patterns = patterns(
'',
url('^feedback/$', FeedbackView.as_view(), name='account-feedback'),
url('^installed/mine/$',
InstalledViewSet.as_view({'get': 'list'}), name='installed-apps'),
url('^installed/mine/remove_app/$',
InstalledViewSet.as_view({'post': 'remove_app'}),
name='installed-apps-remove'),
# Native FxA login view.
url('^login/$', LoginView.as_view(), name='account-login'),
# Oauth FxA login view.
url('^fxa-login/$', FxALoginView.as_view(), name='fxa-account-login'),
url('^logout/$', LogoutView.as_view(), name='account-logout'),
url('^newsletter/$', NewsletterView.as_view(), name='account-newsletter'),
url('^permissions/(?P<pk>[^/]+)/$', PermissionsView.as_view(),
name='account-permissions'),
url('^settings/(?P<pk>[^/]+)/$', AccountView.as_view(),
name='account-settings'),
url(r'^shelves/$', FeedShelfViewSet.as_view(
{'get': 'mine'}), name='feedshelves-mine'),
url('^groups/(?P<pk>[^/]+)/$',
GroupsViewSet.as_view({'get': 'list', 'post': 'create',
'delete': 'destroy'}),
name='account-groups'),
)
api_patterns = patterns(
'',
url('^account/', include(drf_patterns)),
)
user_patterns = patterns(
'',
url('^ajax$', views.ajax, name='users.ajax'),
)
|
RyanChinSang/ECNG3020-ORSS4SCVI
|
refs/heads/master
|
BETA/TestCode/Tensorflow/Test/object_detection/protos/input_reader_pb2.py
|
7
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/input_reader.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/input_reader.proto',
package='object_detection.protos',
serialized_pb=_b('\n*object_detection/protos/input_reader.proto\x12\x17object_detection.protos\"\xff\x02\n\x0bInputReader\x12\x18\n\x0elabel_map_path\x18\x01 \x01(\t:\x00\x12\x15\n\x07shuffle\x18\x02 \x01(\x08:\x04true\x12\x1c\n\x0equeue_capacity\x18\x03 \x01(\r:\x04\x32\x30\x30\x30\x12\x1f\n\x11min_after_dequeue\x18\x04 \x01(\r:\x04\x31\x30\x30\x30\x12\x15\n\nnum_epochs\x18\x05 \x01(\r:\x01\x30\x12\x16\n\x0bnum_readers\x18\x06 \x01(\r:\x01\x38\x12\"\n\x13load_instance_masks\x18\x07 \x01(\x08:\x05\x66\x61lse\x12N\n\x16tf_record_input_reader\x18\x08 \x01(\x0b\x32,.object_detection.protos.TFRecordInputReaderH\x00\x12M\n\x15\x65xternal_input_reader\x18\t \x01(\x0b\x32,.object_detection.protos.ExternalInputReaderH\x00\x42\x0e\n\x0cinput_reader\"+\n\x13TFRecordInputReader\x12\x14\n\ninput_path\x18\x01 \x01(\t:\x00\"\x1c\n\x13\x45xternalInputReader*\x05\x08\x01\x10\xe8\x07')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INPUTREADER = _descriptor.Descriptor(
name='InputReader',
full_name='object_detection.protos.InputReader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_map_path', full_name='object_detection.protos.InputReader.label_map_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shuffle', full_name='object_detection.protos.InputReader.shuffle', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queue_capacity', full_name='object_detection.protos.InputReader.queue_capacity', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=2000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_after_dequeue', full_name='object_detection.protos.InputReader.min_after_dequeue', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='object_detection.protos.InputReader.num_epochs', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_readers', full_name='object_detection.protos.InputReader.num_readers', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=8,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='load_instance_masks', full_name='object_detection.protos.InputReader.load_instance_masks', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tf_record_input_reader', full_name='object_detection.protos.InputReader.tf_record_input_reader', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='external_input_reader', full_name='object_detection.protos.InputReader.external_input_reader', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='input_reader', full_name='object_detection.protos.InputReader.input_reader',
index=0, containing_type=None, fields=[]),
],
serialized_start=72,
serialized_end=455,
)
_TFRECORDINPUTREADER = _descriptor.Descriptor(
name='TFRecordInputReader',
full_name='object_detection.protos.TFRecordInputReader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_path', full_name='object_detection.protos.TFRecordInputReader.input_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=457,
serialized_end=500,
)
_EXTERNALINPUTREADER = _descriptor.Descriptor(
name='ExternalInputReader',
full_name='object_detection.protos.ExternalInputReader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1, 1000), ],
oneofs=[
],
serialized_start=502,
serialized_end=530,
)
_INPUTREADER.fields_by_name['tf_record_input_reader'].message_type = _TFRECORDINPUTREADER
_INPUTREADER.fields_by_name['external_input_reader'].message_type = _EXTERNALINPUTREADER
_INPUTREADER.oneofs_by_name['input_reader'].fields.append(
_INPUTREADER.fields_by_name['tf_record_input_reader'])
_INPUTREADER.fields_by_name['tf_record_input_reader'].containing_oneof = _INPUTREADER.oneofs_by_name['input_reader']
_INPUTREADER.oneofs_by_name['input_reader'].fields.append(
_INPUTREADER.fields_by_name['external_input_reader'])
_INPUTREADER.fields_by_name['external_input_reader'].containing_oneof = _INPUTREADER.oneofs_by_name['input_reader']
DESCRIPTOR.message_types_by_name['InputReader'] = _INPUTREADER
DESCRIPTOR.message_types_by_name['TFRecordInputReader'] = _TFRECORDINPUTREADER
DESCRIPTOR.message_types_by_name['ExternalInputReader'] = _EXTERNALINPUTREADER
InputReader = _reflection.GeneratedProtocolMessageType('InputReader', (_message.Message,), dict(
DESCRIPTOR = _INPUTREADER,
__module__ = 'object_detection.protos.input_reader_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.InputReader)
))
_sym_db.RegisterMessage(InputReader)
TFRecordInputReader = _reflection.GeneratedProtocolMessageType('TFRecordInputReader', (_message.Message,), dict(
DESCRIPTOR = _TFRECORDINPUTREADER,
__module__ = 'object_detection.protos.input_reader_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.TFRecordInputReader)
))
_sym_db.RegisterMessage(TFRecordInputReader)
ExternalInputReader = _reflection.GeneratedProtocolMessageType('ExternalInputReader', (_message.Message,), dict(
DESCRIPTOR = _EXTERNALINPUTREADER,
__module__ = 'object_detection.protos.input_reader_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ExternalInputReader)
))
_sym_db.RegisterMessage(ExternalInputReader)
# @@protoc_insertion_point(module_scope)
|
ambitioninc/django-query-builder
|
refs/heads/develop
|
querybuilder/tests/utils.py
|
2
|
def get_postgres_version():
"""We get the version as an integer. Last two digits are patch
version, two before that are minor version, start is the major
version. Return the version as a 3-tuple.
Eg: 90311 -> 9.3.11 -> (9, 3, 11)
"""
# Import this here so we can import this method before we have a
# database to connect to.
from django.db import connection
version = connection.pg_version
patch_version = version % 100
minor_version = (version - patch_version) / 100 % 100
major_version = (version - patch_version - minor_version * 100) / 10000
return (major_version, minor_version, patch_version)
|
muntasirsyed/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/CommentIncluded.before.py
|
79
|
<selection>tmp = "!" #try to extract this assignment, either with or without this comment</selection>
def bar(self):
pass
|
40223136/w17test1
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/threading.py
|
730
|
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import sleep as _sleep
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value = self._value + 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
break
self._block.wait(delay)
finally:
self._block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
|
kingvuplus/xx-v
|
refs/heads/master
|
lib/python/Tools/__init__.py
|
12133432
| |
Endika/partner-contact
|
refs/heads/8.0
|
partner_external_maps/partner_external_maps.py
|
22
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Partner External Maps module for Odoo
# Copyright (C) 2015 Akretion (http://www.akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import Warning
import logging
logger = logging.getLogger(__name__)
class MapWebsite(models.Model):
_name = 'map.website'
_description = 'Map Website'
name = fields.Char(string='Map Website Name', required=True)
address_url = fields.Char(
string='URL that uses the address',
help="In this URL, {ADDRESS} will be replaced by the address.")
lat_lon_url = fields.Char(
string='URL that uses latitude and longitude',
help="In this URL, {LATITUDE} and {LONGITUDE} will be replaced by "
"the latitude and longitude (requires the module 'base_geolocalize')")
route_address_url = fields.Char(
string='Route URL that uses the addresses',
help="In this URL, {START_ADDRESS} and {DEST_ADDRESS} will be "
"replaced by the start and destination addresses.")
route_lat_lon_url = fields.Char(
string='Route URL that uses latitude and longitude',
help="In this URL, {START_LATITUDE}, {START_LONGITUDE}, "
"{DEST_LATITUDE} and {DEST_LONGITUDE} will be replaced by the "
"latitude and longitude of the start and destination adresses "
"(requires the module 'base_geolocalize').")
class ResUsers(models.Model):
_inherit = 'res.users'
@api.model
def _default_map_website(self):
map_website = self.env['map.website'].search([
'|', ('address_url', '!=', False), ('lat_lon_url', '!=', False)],
limit=1)
return map_website
@api.model
def _default_route_map_website(self):
map_route_website = self.env['map.website'].search([
'|',
('route_address_url', '!=', False),
('route_lat_lon_url', '!=', False)], limit=1)
return map_route_website
# begin with context_ to allow user to change it by himself
context_map_website_id = fields.Many2one(
'map.website', string='Map Website',
domain=[
'|', ('address_url', '!=', False), ('lat_lon_url', '!=', False)],
default=_default_map_website)
# We want to give the possibility to the user to have one map provider for
# regular maps and another one for routing
context_route_map_website_id = fields.Many2one(
'map.website', string='Route Map Website',
domain=[
'|',
('route_address_url', '!=', False),
('route_lat_lon_url', '!=', False)],
default=_default_route_map_website,
help="Map provided used when you click on the car icon on the partner "
"form to display an itinerary.")
context_route_start_partner_id = fields.Many2one(
'res.partner', string='Start Address for Route Map')
@api.model
def _default_map_settings(self):
"""Method called from post-install script
I can't use a default method on the field, because it would be executed
before loading map_website_data.xml, so it would not be able to set a
value"""
users = self.env['res.users'].search([])
map_website = self._default_map_website()
map_route_website = self._default_route_map_website()
logger.info('Updating user settings for maps...')
for user in users:
user.write({
'context_map_website_id': map_website.id or False,
'context_route_map_website_id': map_route_website.id or False,
'context_route_start_partner_id': user.partner_id.id or False,
})
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.model
def _address_as_string(self):
addr = []
if self.street:
addr.append(self.street)
if self.street2:
addr.append(self.street2)
if self.city:
addr.append(self.city)
if self.state_id:
addr.append(self.state_id.name)
if self.country_id:
addr.append(self.country_id.name)
if not addr:
raise Warning(
_("Address missing on partner '%s'.") % self.name)
address = ' '.join(addr)
return address
@api.model
def _prepare_url(self, url, replace):
assert url, 'Missing URL'
for key, value in replace.iteritems():
if not isinstance(value, (str, unicode)):
# for latitude and longitude which are floats
value = unicode(value)
url = url.replace(key, value)
logger.debug('Final URL: %s', url)
return url
@api.multi
def open_map(self):
if not self.env.user.context_map_website_id:
raise Warning(
_('Missing map provider: '
'you should set it in your preferences.'))
map_website = self.env.user.context_map_website_id
if (
map_website.lat_lon_url and
hasattr(self, 'partner_latitude') and
self.partner_latitude and self.partner_longitude):
url = self._prepare_url(
map_website.lat_lon_url, {
'{LATITUDE}': self.partner_latitude,
'{LONGITUDE}': self.partner_longitude})
else:
if not map_website.address_url:
raise Warning(
_("Missing parameter 'URL that uses the address' "
"for map website '%s'.") % map_website.name)
url = self._prepare_url(
map_website.address_url,
{'{ADDRESS}': self._address_as_string()})
return {
'type': 'ir.actions.act_url',
'url': url,
'target': 'new',
}
@api.multi
def open_route_map(self):
if not self.env.user.context_route_map_website_id:
raise Warning(
_('Missing route map website: '
'you should set it in your preferences.'))
map_website = self.env.user.context_route_map_website_id
if not self.env.user.context_route_start_partner_id:
raise Warning(
_('Missing start address for route map: '
'you should set it in your preferences.'))
start_partner = self.env.user.context_route_start_partner_id
if (
map_website.route_lat_lon_url and
hasattr(self, 'partner_latitude') and
self.partner_latitude and
self.partner_longitude and
start_partner.partner_latitude and
start_partner.partner_longitude):
url = self._prepare_url(
map_website.route_lat_lon_url, {
'{START_LATITUDE}': start_partner.partner_latitude,
'{START_LONGITUDE}': start_partner.partner_longitude,
'{DEST_LATITUDE}': self.partner_latitude,
'{DEST_LONGITUDE}': self.partner_longitude})
else:
if not map_website.route_address_url:
raise Warning(
_("Missing route URL that uses the addresses "
"for the map website '%s'") % map_website.name)
url = self._prepare_url(
map_website.route_address_url, {
'{START_ADDRESS}': start_partner._address_as_string(),
'{DEST_ADDRESS}': self._address_as_string()})
return {
'type': 'ir.actions.act_url',
'url': url,
'target': 'new',
}
|
clouddocx/boto
|
refs/heads/master
|
boto/ec2containerservice/layer1.py
|
135
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.ec2containerservice import exceptions
class EC2ContainerServiceConnection(AWSQueryConnection):
"""
Amazon EC2 Container Service (Amazon ECS) is a highly scalable,
fast, container management service that makes it easy to run,
stop, and manage Docker containers on a cluster of Amazon EC2
instances. Amazon ECS lets you launch and stop container-enabled
applications with simple API calls, allows you to get the state of
your cluster from a centralized service, and gives you access to
many familiar Amazon EC2 features like security groups, Amazon EBS
volumes, and IAM roles.
You can use Amazon ECS to schedule the placement of containers
across your cluster based on your resource needs, isolation
policies, and availability requirements. Amazon EC2 Container
Service eliminates the need for you to operate your own cluster
management and configuration management systems or worry about
scaling your management infrastructure.
"""
APIVersion = "2014-11-13"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"ServerException": exceptions.ServerException,
"ClientException": exceptions.ClientException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(EC2ContainerServiceConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_cluster(self, cluster_name=None):
"""
Creates a new Amazon ECS cluster. By default, your account
will receive a `default` cluster when you launch your first
container instance. However, you can create your own cluster
with a unique name with the `CreateCluster` action.
During the preview, each account is limited to two clusters.
:type cluster_name: string
:param cluster_name: The name of your cluster. If you do not specify a
name for your cluster, you will create a cluster named `default`.
"""
params = {}
if cluster_name is not None:
params['clusterName'] = cluster_name
return self._make_request(
action='CreateCluster',
verb='POST',
path='/', params=params)
def delete_cluster(self, cluster):
"""
Deletes the specified cluster. You must deregister all
container instances from this cluster before you may delete
it. You can list the container instances in a cluster with
ListContainerInstances and deregister them with
DeregisterContainerInstance.
:type cluster: string
:param cluster: The cluster you want to delete.
"""
params = {'cluster': cluster, }
return self._make_request(
action='DeleteCluster',
verb='POST',
path='/', params=params)
def deregister_container_instance(self, container_instance, cluster=None,
force=None):
"""
Deregisters an Amazon ECS container instance from the
specified cluster. This instance will no longer be available
to run tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instance you want to
deregister. If you do not specify a cluster, the default cluster is
assumed.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance you want to
deregister. The ARN contains the `arn:aws:ecs` namespace, followed
by the region of the container instance, the AWS account ID of the
container instance owner, the `container-instance` namespace, and
then the container instance UUID. For example, arn:aws:ecs: region
: aws_account_id :container-instance/ container_instance_UUID .
:type force: boolean
:param force: Force the deregistration of the container instance. You
can use the `force` parameter if you have several tasks running on
a container instance and you don't want to run `StopTask` for each
task before deregistering the container instance.
"""
params = {'containerInstance': container_instance, }
if cluster is not None:
params['cluster'] = cluster
if force is not None:
params['force'] = str(
force).lower()
return self._make_request(
action='DeregisterContainerInstance',
verb='POST',
path='/', params=params)
def deregister_task_definition(self, task_definition):
"""
Deregisters the specified task definition. You will no longer
be able to run tasks from this definition after
deregistration.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to deregister.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DeregisterTaskDefinition',
verb='POST',
path='/', params=params)
def describe_clusters(self, clusters=None):
"""
Describes one or more of your clusters.
:type clusters: list
:param clusters: A space-separated list of cluster names or full
cluster Amazon Resource Name (ARN) entries. If you do not specify a
cluster, the default cluster is assumed.
"""
params = {}
if clusters is not None:
self.build_list_params(params,
clusters,
'clusters.member')
return self._make_request(
action='DescribeClusters',
verb='POST',
path='/', params=params)
def describe_container_instances(self, container_instances, cluster=None):
"""
Describes Amazon EC2 Container Service container instances.
Returns metadata about registered and remaining resources on
each container instance requested.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to
describe. If you do not specify a cluster, the default cluster is
assumed.
:type container_instances: list
:param container_instances: A space-separated list of container
instance UUIDs or full Amazon Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeContainerInstances',
verb='POST',
path='/', params=params)
def describe_task_definition(self, task_definition):
"""
Describes a task definition.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to describe.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DescribeTaskDefinition',
verb='POST',
path='/', params=params)
def describe_tasks(self, tasks, cluster=None):
"""
Describes a specified task or tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to describe. If you do not
specify a cluster, the default cluster is assumed.
:type tasks: list
:param tasks: A space-separated list of task UUIDs or full Amazon
Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
tasks,
'tasks.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeTasks',
verb='POST',
path='/', params=params)
def discover_poll_endpoint(self, container_instance=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Returns an endpoint for the Amazon EC2 Container Service agent
to poll for updates.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance. The ARN contains the
`arn:aws:ecs` namespace, followed by the region of the container
instance, the AWS account ID of the container instance owner, the
`container-instance` namespace, and then the container instance
UUID. For example, arn:aws:ecs: region : aws_account_id :container-
instance/ container_instance_UUID .
"""
params = {}
if container_instance is not None:
params['containerInstance'] = container_instance
return self._make_request(
action='DiscoverPollEndpoint',
verb='POST',
path='/', params=params)
def list_clusters(self, next_token=None, max_results=None):
"""
Returns a list of existing clusters.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListClusters` request where `maxResults` was used and
the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of cluster results returned by
`ListClusters` in paginated output. When this parameter is used,
`ListClusters` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListClusters`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then
`ListClusters` returns up to 100 results and a `nextToken` value if
applicable.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListClusters',
verb='POST',
path='/', params=params)
def list_container_instances(self, cluster=None, next_token=None,
max_results=None):
"""
Returns a list of container instances in a specified cluster.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to list. If
you do not specify a cluster, the default cluster is assumed..
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListContainerInstances` request where `maxResults` was
used and the results exceeded the value of that parameter.
Pagination continues from the end of the previous results that
returned the `nextToken` value. This value is `null` when there are
no more results to return.
:type max_results: integer
:param max_results: The maximum number of container instance results
returned by `ListContainerInstances` in paginated output. When this
parameter is used, `ListContainerInstances` only returns
`maxResults` results in a single page along with a `nextToken`
response element. The remaining results of the initial request can
be seen by sending another `ListContainerInstances` request with
the returned `nextToken` value. This value can be between 1 and
100. If this parameter is not used, then `ListContainerInstances`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListContainerInstances',
verb='POST',
path='/', params=params)
def list_task_definitions(self, family_prefix=None, next_token=None,
max_results=None):
"""
Returns a list of task definitions that are registered to your
account. You can filter the results by family name with the
`familyPrefix` parameter.
:type family_prefix: string
:param family_prefix: The name of the family that you want to filter
the `ListTaskDefinitions` results with. Specifying a `familyPrefix`
will limit the listed task definitions to definitions that belong
to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTaskDefinitions` request where `maxResults` was used
and the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of task definition results
returned by `ListTaskDefinitions` in paginated output. When this
parameter is used, `ListTaskDefinitions` only returns `maxResults`
results in a single page along with a `nextToken` response element.
The remaining results of the initial request can be seen by sending
another `ListTaskDefinitions` request with the returned `nextToken`
value. This value can be between 1 and 100. If this parameter is
not used, then `ListTaskDefinitions` returns up to 100 results and
a `nextToken` value if applicable.
"""
params = {}
if family_prefix is not None:
params['familyPrefix'] = family_prefix
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTaskDefinitions',
verb='POST',
path='/', params=params)
def list_tasks(self, cluster=None, container_instance=None, family=None,
next_token=None, max_results=None):
"""
Returns a list of tasks for a specified cluster. You can
filter the results by family name or by a particular container
instance with the `family` and `containerInstance` parameters.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the tasks you want to list. If you do not
specify a cluster, the default cluster is assumed..
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance that you want to
filter the `ListTasks` results with. Specifying a
`containerInstance` will limit the results to tasks that belong to
that container instance.
:type family: string
:param family: The name of the family that you want to filter the
`ListTasks` results with. Specifying a `family` will limit the
results to tasks that belong to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTasks` request where `maxResults` was used and the
results exceeded the value of that parameter. Pagination continues
from the end of the previous results that returned the `nextToken`
value. This value is `null` when there are no more results to
return.
:type max_results: integer
:param max_results: The maximum number of task results returned by
`ListTasks` in paginated output. When this parameter is used,
`ListTasks` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListTasks`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then `ListTasks`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if container_instance is not None:
params['containerInstance'] = container_instance
if family is not None:
params['family'] = family
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTasks',
verb='POST',
path='/', params=params)
def register_container_instance(self, cluster=None,
instance_identity_document=None,
instance_identity_document_signature=None,
total_resources=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Registers an Amazon EC2 instance into the specified cluster.
This instance will become available to place containers on.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to register your container instance with.
If you do not specify a cluster, the default cluster is assumed..
:type instance_identity_document: string
:param instance_identity_document:
:type instance_identity_document_signature: string
:param instance_identity_document_signature:
:type total_resources: list
:param total_resources:
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if instance_identity_document is not None:
params['instanceIdentityDocument'] = instance_identity_document
if instance_identity_document_signature is not None:
params['instanceIdentityDocumentSignature'] = instance_identity_document_signature
if total_resources is not None:
self.build_complex_list_params(
params, total_resources,
'totalResources.member',
('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue'))
return self._make_request(
action='RegisterContainerInstance',
verb='POST',
path='/', params=params)
def register_task_definition(self, family, container_definitions):
"""
Registers a new task definition from the supplied `family` and
`containerDefinitions`.
:type family: string
:param family: You can specify a `family` for a task definition, which
allows you to track multiple versions of the same task definition.
You can think of the `family` as a name for your task definition.
:type container_definitions: list
:param container_definitions: A list of container definitions in JSON
format that describe the different containers that make up your
task.
"""
params = {'family': family, }
self.build_complex_list_params(
params, container_definitions,
'containerDefinitions.member',
('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment'))
return self._make_request(
action='RegisterTaskDefinition',
verb='POST',
path='/', params=params)
def run_task(self, task_definition, cluster=None, overrides=None,
count=None):
"""
Start a task using random placement and the default Amazon ECS
scheduler. If you want to use your own scheduler or place a
task on a specific container instance, use `StartTask`
instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to run your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to run.
:type overrides: dict
:param overrides:
:type count: integer
:param count: The number of instances of the specified task that you
would like to place on your cluster.
"""
params = {'taskDefinition': task_definition, }
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
if count is not None:
params['count'] = count
return self._make_request(
action='RunTask',
verb='POST',
path='/', params=params)
def start_task(self, task_definition, container_instances, cluster=None,
overrides=None):
"""
Starts a new task from the specified task definition on the
specified container instance or instances. If you want to use
the default Amazon ECS scheduler to place your task, use
`RunTask` instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to start your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to start.
:type overrides: dict
:param overrides:
:type container_instances: list
:param container_instances: The container instance UUIDs or full Amazon
Resource Name (ARN) entries for the container instances on which
you would like to place your task.
"""
params = {'taskDefinition': task_definition, }
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
return self._make_request(
action='StartTask',
verb='POST',
path='/', params=params)
def stop_task(self, task, cluster=None):
"""
Stops a running task.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to stop. If you do not
specify a cluster, the default cluster is assumed..
:type task: string
:param task: The task UUIDs or full Amazon Resource Name (ARN) entry of
the task you would like to stop.
"""
params = {'task': task, }
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='StopTask',
verb='POST',
path='/', params=params)
def submit_container_state_change(self, cluster=None, task=None,
container_name=None, status=None,
exit_code=None, reason=None,
network_bindings=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a container changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task that hosts the container.
:type container_name: string
:param container_name: The name of the container.
:type status: string
:param status: The status of the state change request.
:type exit_code: integer
:param exit_code: The exit code returned for the state change request.
:type reason: string
:param reason: The reason for the state change request.
:type network_bindings: list
:param network_bindings: The network bindings of the container.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if container_name is not None:
params['containerName'] = container_name
if status is not None:
params['status'] = status
if exit_code is not None:
params['exitCode'] = exit_code
if reason is not None:
params['reason'] = reason
if network_bindings is not None:
self.build_complex_list_params(
params, network_bindings,
'networkBindings.member',
('bindIP', 'containerPort', 'hostPort'))
return self._make_request(
action='SubmitContainerStateChange',
verb='POST',
path='/', params=params)
def submit_task_state_change(self, cluster=None, task=None, status=None,
reason=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a task changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task in the state change request.
:type status: string
:param status: The status of the state change request.
:type reason: string
:param reason: The reason for the state change request.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if status is not None:
params['status'] = status
if reason is not None:
params['reason'] = reason
return self._make_request(
action='SubmitTaskStateChange',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
markvl/acrylamid
|
refs/heads/master
|
acrylamid/filters/replace.py
|
2
|
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Christian Koepp <ckoepp@gmail.com>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
from acrylamid import log
from acrylamid.filters import Filter
class Replace(Filter):
match = ['replace']
version = 1
priority = 0.0
def init(self, conf, env, *args):
try:
self._db = conf.replace_rules
except AttributeError:
log.warn('No configuration named REPLACE_RULES found. Replace filter has nothing to do.')
self._db = dict()
def transform(self, content, entry, *args):
if len(self._db) == 0:
return content
for k,v in self._db.items():
content = content.replace(k, v)
return content
|
ad2476/fb-toy-langmod
|
refs/heads/master
|
corpus.py
|
1
|
#!/usr/bin/env python2
import sys
import argparse
from os import path
import numpy as np
def parseProgramArgs():
parser = argparse.ArgumentParser(description="Split a corpus into test/train documents.")
parser.add_argument("corpus", help="Path to corpus")
parser.add_argument("output_dir", help="Directory into which to output train.txt and test.txt")
mutexgroup = parser.add_mutually_exclusive_group(required=True)
mutexgroup.add_argument("-l", "--line", type=int, help="Line number in corpus to use as test (for cross-validation)")
mutexgroup.add_argument("-r", "--ratio", type=float, help="Ratio of corpus to use as test (random subset)")
mutexgroup.add_argument("-n", "--size", type=int, help="Size of test corpus, by line count (random subset)")
return parser.parse_args()
if __name__ == '__main__':
args = parseProgramArgs()
fname = args.corpus
destdir = args.output_dir
f = open(fname, "r") # open the corpus for reading
trainF = open(path.join(destdir, "train.txt"), "w")
testF = open(path.join(destdir, "heldout.txt"), "w")
corpus = [l for l in f]
N = len(corpus)
if args.size or args.ratio: # Want to randomly select some number of lines into test/train:
if args.size:
test_size = args.size # this is how many lines we want in the test corpus
else: # otherwise, we're going by ratio:
test_size = int(N*args.ratio) # number of lines in test corpus
# randomly select $test_size line numbers in range 0,N-1
lines = np.random.random_integers(low=0,high=N-1,size=test_size)
for i in lines: # use these for the test corpus:
testF.write(corpus[i])
# next, put the rest of the lines in the train corpus:
for i in xrange(0,N):
if not i in lines: # i cannot be in list of indices used for test
trainF.write(corpus[i])
else: # we just select a single line from the corpus for our test corpus:
line = args.line - 1 # account for 0-indexing
if line > N:
sys.stderr.write("Line out of bounds! (%d lines in corpus)\n"%N)
sys.exit(1)
testF.write(corpus[line]) # write this line to the test corpus
# write all other lines to train corpus:
for i in xrange(0,N):
if i != line:
trainF.write(corpus[i])
f.close()
trainF.close()
testF.close()
|
aryeh/py-authorize
|
refs/heads/master
|
authorize/customer.py
|
3
|
from authorize import Configuration
class Customer(object):
@staticmethod
def create(params={}):
return Configuration.api.customer.create(params)
@staticmethod
def details(customer_id):
return Configuration.api.customer.details(customer_id)
@staticmethod
def update(customer_id, params={}):
return Configuration.api.customer.update(customer_id, params)
@staticmethod
def delete(customer_id):
return Configuration.api.customer.delete(customer_id)
@staticmethod
def list():
return Configuration.api.customer.list()
|
ljnutal6/media-recommend
|
refs/heads/master
|
app/virtualenvs/recommedia/lib/python2.7/site-packages/werkzeug/datastructures.py
|
314
|
# -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(k, v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return iter(rv)
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
|
dparlevliet/zelenka-report-storage
|
refs/heads/master
|
server-db/twisted/python/test/test_release.py
|
5
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.release} and L{twisted.python._release}.
All of these tests are skipped on platforms other than Linux, as the release is
only ever performed on Linux.
"""
import glob
import warnings
import operator
import os
import sys
import textwrap
from StringIO import StringIO
import tarfile
from xml.dom import minidom as dom
from datetime import date
from twisted.trial.unittest import TestCase
from twisted.python.compat import execfile
from twisted.python.procutils import which
from twisted.python import release
from twisted.python.filepath import FilePath
from twisted.python.versions import Version
from twisted.test.testutils import XMLAssertionMixin
from twisted.web.microdom import parseXMLString
from twisted.python._release import (
_changeVersionInFile, getNextVersion, findTwistedProjects, replaceInFile,
replaceProjectVersion, Project, generateVersionFileData,
changeAllProjectVersions, VERSION_OFFSET, DocBuilder, ManBuilder,
NoDocumentsFound, filePathDelta, CommandFailed, BookBuilder,
DistributionBuilder, APIBuilder, BuildAPIDocsScript, buildAllTarballs,
runCommand, UncleanWorkingDirectory, NotWorkingDirectory,
ChangeVersionsScript, BuildTarballsScript, NewsBuilder, BuildDocsScript,
SphinxBuilder)
if os.name != 'posix':
skip = "Release toolchain only supported on POSIX."
else:
skip = None
# Check a bunch of dependencies to skip tests if necessary.
try:
from twisted.lore.scripts import lore
except ImportError:
loreSkip = "Lore is not present."
else:
loreSkip = skip
try:
import pydoctor.driver
# it might not be installed, or it might use syntax not available in
# this version of Python.
except (ImportError, SyntaxError):
pydoctorSkip = "Pydoctor is not present."
else:
if getattr(pydoctor, "version_info", (0,)) < (0, 1):
pydoctorSkip = "Pydoctor is too old."
else:
pydoctorSkip = skip
if which("sphinx-build"):
sphinxSkip = None
else:
sphinxSkip = "Sphinx not available."
if which("latex") and which("dvips") and which("ps2pdf13"):
latexSkip = skip
else:
latexSkip = "LaTeX is not available."
if which("svn") and which("svnadmin"):
svnSkip = skip
else:
svnSkip = "svn or svnadmin is not present."
def genVersion(*args, **kwargs):
"""
A convenience for generating _version.py data.
@param args: Arguments to pass to L{Version}.
@param kwargs: Keyword arguments to pass to L{Version}.
"""
return generateVersionFileData(Version(*args, **kwargs))
class StructureAssertingMixin(object):
"""
A mixin for L{TestCase} subclasses which provides some methods for
asserting the structure and contents of directories and files on the
filesystem.
"""
def createStructure(self, root, dirDict):
"""
Create a set of directories and files given a dict defining their
structure.
@param root: The directory in which to create the structure. It must
already exist.
@type root: L{FilePath}
@param dirDict: The dict defining the structure. Keys should be strings
naming files, values should be strings describing file contents OR
dicts describing subdirectories. All files are written in binary
mode. Any string values are assumed to describe text files and
will have their newlines replaced with the platform-native newline
convention. For example::
{"foofile": "foocontents",
"bardir": {"barfile": "bar\ncontents"}}
@type dirDict: C{dict}
"""
for x in dirDict:
child = root.child(x)
if isinstance(dirDict[x], dict):
child.createDirectory()
self.createStructure(child, dirDict[x])
else:
child.setContent(dirDict[x].replace('\n', os.linesep))
def assertStructure(self, root, dirDict):
"""
Assert that a directory is equivalent to one described by a dict.
@param root: The filesystem directory to compare.
@type root: L{FilePath}
@param dirDict: The dict that should describe the contents of the
directory. It should be the same structure as the C{dirDict}
parameter to L{createStructure}.
@type dirDict: C{dict}
"""
children = [x.basename() for x in root.children()]
for x in dirDict:
child = root.child(x)
if isinstance(dirDict[x], dict):
self.assertTrue(child.isdir(), "%s is not a dir!"
% (child.path,))
self.assertStructure(child, dirDict[x])
else:
a = child.getContent().replace(os.linesep, '\n')
self.assertEqual(a, dirDict[x], child.path)
children.remove(x)
if children:
self.fail("There were extra children in %s: %s"
% (root.path, children))
def assertExtractedStructure(self, outputFile, dirDict):
"""
Assert that a tarfile content is equivalent to one described by a dict.
@param outputFile: The tar file built by L{DistributionBuilder}.
@type outputFile: L{FilePath}.
@param dirDict: The dict that should describe the contents of the
directory. It should be the same structure as the C{dirDict}
parameter to L{createStructure}.
@type dirDict: C{dict}
"""
tarFile = tarfile.TarFile.open(outputFile.path, "r:bz2")
extracted = FilePath(self.mktemp())
extracted.createDirectory()
for info in tarFile:
tarFile.extract(info, path=extracted.path)
self.assertStructure(extracted.children()[0], dirDict)
class ChangeVersionTest(TestCase, StructureAssertingMixin):
"""
Twisted has the ability to change versions.
"""
def makeFile(self, relativePath, content):
"""
Create a file with the given content relative to a temporary directory.
@param relativePath: The basename of the file to create.
@param content: The content that the file will have.
@return: The filename.
"""
baseDirectory = FilePath(self.mktemp())
directory, filename = os.path.split(relativePath)
directory = baseDirectory.preauthChild(directory)
directory.makedirs()
file = directory.child(filename)
directory.child(filename).setContent(content)
return file
def test_getNextVersion(self):
"""
When calculating the next version to release when a release is
happening in the same year as the last release, the minor version
number is incremented.
"""
now = date.today()
major = now.year - VERSION_OFFSET
version = Version("twisted", major, 9, 0)
self.assertEqual(
getNextVersion(version, prerelease=False, patch=False, today=now),
Version("twisted", major, 10, 0))
def test_getNextVersionAfterYearChange(self):
"""
When calculating the next version to release when a release is
happening in a later year, the minor version number is reset to 0.
"""
now = date.today()
major = now.year - VERSION_OFFSET
version = Version("twisted", major - 1, 9, 0)
self.assertEqual(
getNextVersion(version, prerelease=False, patch=False, today=now),
Version("twisted", major, 0, 0))
def test_getNextVersionPreRelease(self):
"""
L{getNextVersion} updates the major to the current year, and resets the
minor when creating a pre-release.
"""
now = date.today()
major = now.year - VERSION_OFFSET
version = Version("twisted", 3, 9, 0)
self.assertEqual(
getNextVersion(version, prerelease=True, patch=False, today=now),
Version("twisted", major, 0, 0, 1))
def test_getNextVersionFinalRelease(self):
"""
L{getNextVersion} resets the pre-release count when making a final
release after a pre-release.
"""
now = date.today()
version = Version("twisted", 3, 9, 0, 1)
self.assertEqual(
getNextVersion(version, prerelease=False, patch=False, today=now),
Version("twisted", 3, 9, 0))
def test_getNextVersionNextPreRelease(self):
"""
L{getNextVersion} just increments the pre-release number when operating
on a pre-release.
"""
now = date.today()
version = Version("twisted", 3, 9, 1, 1)
self.assertEqual(
getNextVersion(version, prerelease=True, patch=False, today=now),
Version("twisted", 3, 9, 1, 2))
def test_getNextVersionPatchRelease(self):
"""
L{getNextVersion} sets the micro number when creating a patch release.
"""
now = date.today()
version = Version("twisted", 3, 9, 0)
self.assertEqual(
getNextVersion(version, prerelease=False, patch=True, today=now),
Version("twisted", 3, 9, 1))
def test_getNextVersionNextPatchRelease(self):
"""
L{getNextVersion} just increments the micro number when creating a
patch release.
"""
now = date.today()
version = Version("twisted", 3, 9, 1)
self.assertEqual(
getNextVersion(version, prerelease=False, patch=True, today=now),
Version("twisted", 3, 9, 2))
def test_getNextVersionNextPatchPreRelease(self):
"""
L{getNextVersion} updates both the micro version and the pre-release
count when making a patch pre-release.
"""
now = date.today()
version = Version("twisted", 3, 9, 1)
self.assertEqual(
getNextVersion(version, prerelease=True, patch=True, today=now),
Version("twisted", 3, 9, 2, 1))
def test_changeVersionInFile(self):
"""
_changeVersionInFile replaces the old version information in a file
with the given new version information.
"""
# The version numbers are arbitrary, the name is only kind of
# arbitrary.
packageName = 'foo'
oldVersion = Version(packageName, 2, 5, 0)
file = self.makeFile('README',
"Hello and welcome to %s." % oldVersion.base())
newVersion = Version(packageName, 7, 6, 0)
_changeVersionInFile(oldVersion, newVersion, file.path)
self.assertEqual(file.getContent(),
"Hello and welcome to %s." % newVersion.base())
def test_changeAllProjectVersions(self):
"""
L{changeAllProjectVersions} changes all version numbers in _version.py
and README files for all projects as well as in the the top-level
README file.
"""
root = FilePath(self.mktemp())
root.createDirectory()
structure = {
"README": "Hi this is 1.0.0.",
"twisted": {
"topfiles": {
"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted", 1, 0, 0),
"web": {
"topfiles": {
"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted.web", 1, 0, 0)}}}
self.createStructure(root, structure)
releaseDate = date(2010, 1, 1)
changeAllProjectVersions(root, False, False, releaseDate)
outStructure = {
"README": "Hi this is 10.0.0.",
"twisted": {
"topfiles": {
"README": "Hi this is 10.0.0"},
"_version.py": genVersion("twisted", 10, 0, 0),
"web": {
"topfiles": {
"README": "Hi this is 10.0.0"},
"_version.py": genVersion("twisted.web", 10, 0, 0)}}}
self.assertStructure(root, outStructure)
def test_changeAllProjectVersionsPreRelease(self):
"""
L{changeAllProjectVersions} changes all version numbers in _version.py
and README files for all projects as well as in the the top-level
README file. If the old version was a pre-release, it will change the
version in NEWS files as well.
"""
root = FilePath(self.mktemp())
root.createDirectory()
coreNews = ("Twisted Core 1.0.0 (2009-12-25)\n"
"===============================\n"
"\n")
webNews = ("Twisted Web 1.0.0pre1 (2009-12-25)\n"
"==================================\n"
"\n")
structure = {
"README": "Hi this is 1.0.0.",
"NEWS": coreNews + webNews,
"twisted": {
"topfiles": {
"README": "Hi this is 1.0.0",
"NEWS": coreNews},
"_version.py": genVersion("twisted", 1, 0, 0),
"web": {
"topfiles": {
"README": "Hi this is 1.0.0pre1",
"NEWS": webNews},
"_version.py": genVersion("twisted.web", 1, 0, 0, 1)}}}
self.createStructure(root, structure)
releaseDate = date(2010, 1, 1)
changeAllProjectVersions(root, False, False, releaseDate)
coreNews = ("Twisted Core 1.0.0 (2009-12-25)\n"
"===============================\n"
"\n")
webNews = ("Twisted Web 1.0.0 (2010-01-01)\n"
"==============================\n"
"\n")
outStructure = {
"README": "Hi this is 10.0.0.",
"NEWS": coreNews + webNews,
"twisted": {
"topfiles": {
"README": "Hi this is 10.0.0",
"NEWS": coreNews},
"_version.py": genVersion("twisted", 10, 0, 0),
"web": {
"topfiles": {
"README": "Hi this is 1.0.0",
"NEWS": webNews},
"_version.py": genVersion("twisted.web", 1, 0, 0)}}}
self.assertStructure(root, outStructure)
class ProjectTest(TestCase):
"""
There is a first-class representation of a project.
"""
def assertProjectsEqual(self, observedProjects, expectedProjects):
"""
Assert that two lists of L{Project}s are equal.
"""
self.assertEqual(len(observedProjects), len(expectedProjects))
observedProjects = sorted(observedProjects,
key=operator.attrgetter('directory'))
expectedProjects = sorted(expectedProjects,
key=operator.attrgetter('directory'))
for observed, expected in zip(observedProjects, expectedProjects):
self.assertEqual(observed.directory, expected.directory)
def makeProject(self, version, baseDirectory=None):
"""
Make a Twisted-style project in the given base directory.
@param baseDirectory: The directory to create files in
(as a L{FilePath).
@param version: The version information for the project.
@return: L{Project} pointing to the created project.
"""
if baseDirectory is None:
baseDirectory = FilePath(self.mktemp())
baseDirectory.createDirectory()
segments = version.package.split('.')
directory = baseDirectory
for segment in segments:
directory = directory.child(segment)
if not directory.exists():
directory.createDirectory()
directory.child('__init__.py').setContent('')
directory.child('topfiles').createDirectory()
directory.child('topfiles').child('README').setContent(version.base())
replaceProjectVersion(
directory.child('_version.py').path, version)
return Project(directory)
def makeProjects(self, *versions):
"""
Create a series of projects underneath a temporary base directory.
@return: A L{FilePath} for the base directory.
"""
baseDirectory = FilePath(self.mktemp())
baseDirectory.createDirectory()
for version in versions:
self.makeProject(version, baseDirectory)
return baseDirectory
def test_getVersion(self):
"""
Project objects know their version.
"""
version = Version('foo', 2, 1, 0)
project = self.makeProject(version)
self.assertEqual(project.getVersion(), version)
def test_updateVersion(self):
"""
Project objects know how to update the version numbers in those
projects.
"""
project = self.makeProject(Version("bar", 2, 1, 0))
newVersion = Version("bar", 3, 2, 9)
project.updateVersion(newVersion)
self.assertEqual(project.getVersion(), newVersion)
self.assertEqual(
project.directory.child("topfiles").child("README").getContent(),
"3.2.9")
def test_repr(self):
"""
The representation of a Project is Project(directory).
"""
foo = Project(FilePath('bar'))
self.assertEqual(
repr(foo), 'Project(%r)' % (foo.directory))
def test_findTwistedStyleProjects(self):
"""
findTwistedStyleProjects finds all projects underneath a particular
directory. A 'project' is defined by the existence of a 'topfiles'
directory and is returned as a Project object.
"""
baseDirectory = self.makeProjects(
Version('foo', 2, 3, 0), Version('foo.bar', 0, 7, 4))
projects = findTwistedProjects(baseDirectory)
self.assertProjectsEqual(
projects,
[Project(baseDirectory.child('foo')),
Project(baseDirectory.child('foo').child('bar'))])
class UtilityTest(TestCase):
"""
Tests for various utility functions for releasing.
"""
def test_chdir(self):
"""
Test that the runChdirSafe is actually safe, i.e., it still
changes back to the original directory even if an error is
raised.
"""
cwd = os.getcwd()
def chAndBreak():
os.mkdir('releaseCh')
os.chdir('releaseCh')
1 // 0
self.assertRaises(ZeroDivisionError,
release.runChdirSafe, chAndBreak)
self.assertEqual(cwd, os.getcwd())
def test_replaceInFile(self):
"""
L{replaceInFile} replaces data in a file based on a dict. A key from
the dict that is found in the file is replaced with the corresponding
value.
"""
content = 'foo\nhey hey $VER\nbar\n'
outf = open('release.replace', 'w')
outf.write(content)
outf.close()
expected = content.replace('$VER', '2.0.0')
replaceInFile('release.replace', {'$VER': '2.0.0'})
self.assertEqual(open('release.replace').read(), expected)
expected = expected.replace('2.0.0', '3.0.0')
replaceInFile('release.replace', {'2.0.0': '3.0.0'})
self.assertEqual(open('release.replace').read(), expected)
class VersionWritingTest(TestCase):
"""
Tests for L{replaceProjectVersion}.
"""
def test_replaceProjectVersion(self):
"""
L{replaceProjectVersion} writes a Python file that defines a
C{version} variable that corresponds to the given name and version
number.
"""
replaceProjectVersion("test_project",
Version("twisted.test_project", 0, 82, 7))
ns = {'__name___': 'twisted.test_project'}
execfile("test_project", ns)
self.assertEqual(ns["version"].base(), "0.82.7")
def test_replaceProjectVersionWithPrerelease(self):
"""
L{replaceProjectVersion} will write a Version instantiation that
includes a prerelease parameter if necessary.
"""
replaceProjectVersion("test_project",
Version("twisted.test_project", 0, 82, 7,
prerelease=8))
ns = {'__name___': 'twisted.test_project'}
execfile("test_project", ns)
self.assertEqual(ns["version"].base(), "0.82.7pre8")
class BuilderTestsMixin(XMLAssertionMixin):
"""
A mixin class which provides various methods for creating sample Lore input
and output.
@cvar template: The lore template that will be used to prepare sample
output.
@type template: C{str}
@ivar docCounter: A counter which is incremented every time input is
generated and which is included in the documents.
@type docCounter: C{int}
"""
template = '''
<html>
<head><title>Yo:</title></head>
<body>
<div class="body" />
<a href="index.html">Index</a>
<span class="version">Version: </span>
</body>
</html>
'''
def setUp(self):
"""
Initialize the doc counter which ensures documents are unique.
"""
self.docCounter = 0
def getArbitraryOutput(self, version, counter, prefix="", apiBaseURL="%s"):
"""
Get the correct HTML output for the arbitrary input returned by
L{getArbitraryLoreInput} for the given parameters.
@param version: The version string to include in the output.
@type version: C{str}
@param counter: A counter to include in the output.
@type counter: C{int}
"""
document = """\
<?xml version="1.0"?><html>
<head><title>Yo:Hi! Title: %(count)d</title></head>
<body>
<div class="content">Hi! %(count)d<div class="API"><a href="%(foobarLink)s"
title="foobar">foobar</a></div></div>
<a href="%(prefix)sindex.html">Index</a>
<span class="version">Version: %(version)s</span>
</body>
</html>"""
# Try to normalize irrelevant whitespace.
return dom.parseString(
document % {"count": counter, "prefix": prefix,
"version": version,
"foobarLink": apiBaseURL % ("foobar",)}).toxml('utf-8')
def getArbitraryLoreInput(self, counter):
"""
Get an arbitrary, unique (for this test case) string of lore input.
@param counter: A counter to include in the input.
@type counter: C{int}
"""
template = (
'<html>'
'<head><title>Hi! Title: %(count)s</title></head>'
'<body>'
'Hi! %(count)s'
'<div class="API">foobar</div>'
'</body>'
'</html>')
return template % {"count": counter}
def getArbitraryLoreInputAndOutput(self, version, prefix="",
apiBaseURL="%s"):
"""
Get an input document along with expected output for lore run on that
output document, assuming an appropriately-specified C{self.template}.
@param version: A version string to include in the input and output.
@type version: C{str}
@param prefix: The prefix to include in the link to the index.
@type prefix: C{str}
@return: A two-tuple of input and expected output.
@rtype: C{(str, str)}.
"""
self.docCounter += 1
return (self.getArbitraryLoreInput(self.docCounter),
self.getArbitraryOutput(version, self.docCounter,
prefix=prefix, apiBaseURL=apiBaseURL))
def getArbitraryManInput(self):
"""
Get an arbitrary man page content.
"""
return """.TH MANHOLE "1" "August 2001" "" ""
.SH NAME
manhole \- Connect to a Twisted Manhole service
.SH SYNOPSIS
.B manhole
.SH DESCRIPTION
manhole is a GTK interface to Twisted Manhole services. You can execute
python code as if at an interactive Python console inside a running Twisted
process with this."""
def getArbitraryManLoreOutput(self):
"""
Get an arbitrary lore input document which represents man-to-lore
output based on the man page returned from L{getArbitraryManInput}
"""
return """\
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html><head>
<title>MANHOLE.1</title></head>
<body>
<h1>MANHOLE.1</h1>
<h2>NAME</h2>
<p>manhole - Connect to a Twisted Manhole service
</p>
<h2>SYNOPSIS</h2>
<p><strong>manhole</strong> </p>
<h2>DESCRIPTION</h2>
<p>manhole is a GTK interface to Twisted Manhole services. You can execute
python code as if at an interactive Python console inside a running Twisted
process with this.</p>
</body>
</html>
"""
def getArbitraryManHTMLOutput(self, version, prefix=""):
"""
Get an arbitrary lore output document which represents the lore HTML
output based on the input document returned from
L{getArbitraryManLoreOutput}.
@param version: A version string to include in the document.
@type version: C{str}
@param prefix: The prefix to include in the link to the index.
@type prefix: C{str}
"""
# Try to normalize the XML a little bit.
return dom.parseString("""\
<?xml version="1.0" ?><html>
<head><title>Yo:MANHOLE.1</title></head>
<body>
<div class="content">
<span/>
<h2>NAME<a name="auto0"/></h2>
<p>manhole - Connect to a Twisted Manhole service
</p>
<h2>SYNOPSIS<a name="auto1"/></h2>
<p><strong>manhole</strong> </p>
<h2>DESCRIPTION<a name="auto2"/></h2>
<p>manhole is a GTK interface to Twisted Manhole services. You can execute
python code as if at an interactive Python console inside a running Twisted
process with this.</p>
</div>
<a href="%(prefix)sindex.html">Index</a>
<span class="version">Version: %(version)s</span>
</body>
</html>""" % {
'prefix': prefix, 'version': version}).toxml("utf-8")
def setupTeXFiles(self, howtoDir):
"""
Create a main TeX file with 3 sections in C{howtoDir}.
@param howtoDir: The path in which to create the TeX files.
@return: The main TeX file C{FilePath}.
"""
sections = range(3)
self.setupTeXSections(sections, howtoDir)
return self.setupTeXBook(sections, howtoDir)
def setupTeXSections(self, sections, howtoDir):
"""
For every C{sections}, create a TeX file in C{howtoDir}.
@param sections: A list of sections to create.
@param howtoDir: The path in which to create the TeX files.
"""
for section in sections:
texPath = howtoDir.child("%s.tex" % (section,))
texPath.setContent(
self.getArbitraryOutput("1.2.3", section))
def setupTeXBook(self, sections, howtoDir):
"""
Setup the main C{book.tex} file referencing C{sections}.
@param sections: A list of sections to reference.
@param howtoDir: The path in which to create the TeX files.
@return: The main TeX file C{FilePath}.
"""
bookTeX = howtoDir.child("book.tex")
bookTeX.setContent(
r"\documentclass{book}" "\n"
r"\begin{document}" "\n" +
"\n".join([r"\input{%s.tex}" % (n,) for n in sections]) +
r"\end{document}" "\n")
return bookTeX
class DocBuilderTestCase(TestCase, BuilderTestsMixin):
"""
Tests for L{DocBuilder}.
Note for future maintainers: The exact byte equality assertions throughout
this suite may need to be updated due to minor differences in lore. They
should not be taken to mean that Lore must maintain the same byte format
forever. Feel free to update the tests when Lore changes, but please be
careful.
"""
skip = loreSkip
def setUp(self):
"""
Set up a few instance variables that will be useful.
@ivar builder: A plain L{DocBuilder}.
@ivar docCounter: An integer to be used as a counter by the
C{getArbitrary...} methods.
@ivar howtoDir: A L{FilePath} representing a directory to be used for
containing Lore documents.
@ivar templateFile: A L{FilePath} representing a file with
C{self.template} as its content.
"""
BuilderTestsMixin.setUp(self)
self.builder = DocBuilder()
self.howtoDir = FilePath(self.mktemp())
self.howtoDir.createDirectory()
self.templateFile = self.howtoDir.child("template.tpl")
self.templateFile.setContent(self.template)
def test_build(self):
"""
The L{DocBuilder} runs lore on all .xhtml files within a directory.
"""
version = "1.2.3"
input1, output1 = self.getArbitraryLoreInputAndOutput(version)
input2, output2 = self.getArbitraryLoreInputAndOutput(version)
self.howtoDir.child("one.xhtml").setContent(input1)
self.howtoDir.child("two.xhtml").setContent(input2)
self.builder.build(version, self.howtoDir, self.howtoDir,
self.templateFile)
out1 = self.howtoDir.child('one.html')
out2 = self.howtoDir.child('two.html')
self.assertXMLEqual(out1.getContent(), output1)
self.assertXMLEqual(out2.getContent(), output2)
def test_noDocumentsFound(self):
"""
The C{build} method raises L{NoDocumentsFound} if there are no
.xhtml files in the given directory.
"""
self.assertRaises(
NoDocumentsFound,
self.builder.build, "1.2.3", self.howtoDir, self.howtoDir,
self.templateFile)
def test_parentDocumentLinking(self):
"""
The L{DocBuilder} generates correct links from documents to
template-generated links like stylesheets and index backreferences.
"""
input = self.getArbitraryLoreInput(0)
tutoDir = self.howtoDir.child("tutorial")
tutoDir.createDirectory()
tutoDir.child("child.xhtml").setContent(input)
self.builder.build("1.2.3", self.howtoDir, tutoDir, self.templateFile)
outFile = tutoDir.child('child.html')
self.assertIn('<a href="../index.html">Index</a>',
outFile.getContent())
def test_siblingDirectoryDocumentLinking(self):
"""
It is necessary to generate documentation in a directory foo/bar where
stylesheet and indexes are located in foo/baz. Such resources should be
appropriately linked to.
"""
input = self.getArbitraryLoreInput(0)
resourceDir = self.howtoDir.child("resources")
docDir = self.howtoDir.child("docs")
docDir.createDirectory()
docDir.child("child.xhtml").setContent(input)
self.builder.build("1.2.3", resourceDir, docDir, self.templateFile)
outFile = docDir.child('child.html')
self.assertIn('<a href="../resources/index.html">Index</a>',
outFile.getContent())
def test_apiLinking(self):
"""
The L{DocBuilder} generates correct links from documents to API
documentation.
"""
version = "1.2.3"
input, output = self.getArbitraryLoreInputAndOutput(version)
self.howtoDir.child("one.xhtml").setContent(input)
self.builder.build(version, self.howtoDir, self.howtoDir,
self.templateFile, "scheme:apilinks/%s.ext")
out = self.howtoDir.child('one.html')
self.assertIn(
'<a href="scheme:apilinks/foobar.ext" title="foobar">foobar</a>',
out.getContent())
def test_deleteInput(self):
"""
L{DocBuilder.build} can be instructed to delete the input files after
generating the output based on them.
"""
input1 = self.getArbitraryLoreInput(0)
self.howtoDir.child("one.xhtml").setContent(input1)
self.builder.build("whatever", self.howtoDir, self.howtoDir,
self.templateFile, deleteInput=True)
self.assertTrue(self.howtoDir.child('one.html').exists())
self.assertFalse(self.howtoDir.child('one.xhtml').exists())
def test_doNotDeleteInput(self):
"""
Input will not be deleted by default.
"""
input1 = self.getArbitraryLoreInput(0)
self.howtoDir.child("one.xhtml").setContent(input1)
self.builder.build("whatever", self.howtoDir, self.howtoDir,
self.templateFile)
self.assertTrue(self.howtoDir.child('one.html').exists())
self.assertTrue(self.howtoDir.child('one.xhtml').exists())
def test_getLinkrelToSameDirectory(self):
"""
If the doc and resource directories are the same, the linkrel should be
an empty string.
"""
linkrel = self.builder.getLinkrel(FilePath("/foo/bar"),
FilePath("/foo/bar"))
self.assertEqual(linkrel, "")
def test_getLinkrelToParentDirectory(self):
"""
If the doc directory is a child of the resource directory, the linkrel
should make use of '..'.
"""
linkrel = self.builder.getLinkrel(FilePath("/foo"),
FilePath("/foo/bar"))
self.assertEqual(linkrel, "../")
def test_getLinkrelToSibling(self):
"""
If the doc directory is a sibling of the resource directory, the
linkrel should make use of '..' and a named segment.
"""
linkrel = self.builder.getLinkrel(FilePath("/foo/howto"),
FilePath("/foo/examples"))
self.assertEqual(linkrel, "../howto/")
def test_getLinkrelToUncle(self):
"""
If the doc directory is a sibling of the parent of the resource
directory, the linkrel should make use of multiple '..'s and a named
segment.
"""
linkrel = self.builder.getLinkrel(FilePath("/foo/howto"),
FilePath("/foo/examples/quotes"))
self.assertEqual(linkrel, "../../howto/")
class BuildDocsScriptTests(TestCase, BuilderTestsMixin,
StructureAssertingMixin):
"""
Tests for L{BuildDocsScript}.
"""
def setUp(self):
"""
Create a L{BuildDocsScript} in C{self.script}.
"""
BuilderTestsMixin.setUp(self)
self.script = BuildDocsScript()
def test_buildDocs(self):
"""
L{BuildDocsScript.buildDocs} generates Lore man pages, turn all Lore
pages to HTML, and build the PDF book.
"""
rootDir = FilePath(self.mktemp())
rootDir.createDirectory()
loreInput, loreOutput = self.getArbitraryLoreInputAndOutput(
"10.0.0",
apiBaseURL="http://twistedmatrix.com/documents/10.0.0/api/%s.html")
coreIndexInput, coreIndexOutput = self.getArbitraryLoreInputAndOutput(
"10.0.0", prefix="howto/",
apiBaseURL="http://twistedmatrix.com/documents/10.0.0/api/%s.html")
manInput = self.getArbitraryManInput()
manOutput = self.getArbitraryManHTMLOutput("10.0.0", "../howto/")
structure = {
"LICENSE": "copyright!",
"twisted": {"_version.py": genVersion("twisted", 10, 0, 0)},
"doc": {"core": {"index.xhtml": coreIndexInput,
"howto": {"template.tpl": self.template,
"index.xhtml": loreInput},
"man": {"twistd.1": manInput}}}}
outStructure = {
"LICENSE": "copyright!",
"twisted": {"_version.py": genVersion("twisted", 10, 0, 0)},
"doc": {"core": {"index.html": coreIndexOutput,
"howto": {"template.tpl": self.template,
"index.html": loreOutput},
"man": {"twistd.1": manInput,
"twistd-man.html": manOutput}}}}
self.createStructure(rootDir, structure)
howtoDir = rootDir.descendant(["doc", "core", "howto"])
self.setupTeXFiles(howtoDir)
templateFile = howtoDir.child("template.tpl")
self.script.buildDocs(rootDir, templateFile)
howtoDir.child("book.tex").remove()
howtoDir.child("book.pdf").remove()
self.assertStructure(rootDir, outStructure)
test_buildDocs.skip = latexSkip or loreSkip
def test_docsBuilderScriptMainRequiresThreeArguments(self):
"""
SystemExit is raised when the incorrect number of command line
arguments are passed to the main documentation building script.
"""
self.assertRaises(SystemExit, self.script.main, [])
self.assertRaises(SystemExit, self.script.main, ["foo"])
self.assertRaises(SystemExit, self.script.main, ["foo", "bar", "baz"])
def test_docsBuilderScriptMain(self):
"""
The main documentation building script invokes C{buildDocs} with the
arguments passed to it cast as L{FilePath}.
"""
calls = []
self.script.buildDocs = lambda a, b: calls.append((a, b))
self.script.main(["hello", "there"])
self.assertEqual(calls, [(FilePath("hello"), FilePath("there"))])
class APIBuilderTestCase(TestCase):
"""
Tests for L{APIBuilder}.
"""
skip = pydoctorSkip
def test_build(self):
"""
L{APIBuilder.build} writes an index file which includes the name of the
project specified.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
projectName = "Foobar"
packageName = "quux"
projectURL = "scheme:project"
sourceURL = "scheme:source"
docstring = "text in docstring"
privateDocstring = "should also appear in output"
inputPath = FilePath(self.mktemp()).child(packageName)
inputPath.makedirs()
inputPath.child("__init__.py").setContent(
"def foo():\n"
" '%s'\n"
"def _bar():\n"
" '%s'" % (docstring, privateDocstring))
outputPath = FilePath(self.mktemp())
outputPath.makedirs()
builder = APIBuilder()
builder.build(projectName, projectURL, sourceURL, inputPath,
outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(),
"API index %r did not exist." % (outputPath.path,))
self.assertIn(
'<a href="%s">%s</a>' % (projectURL, projectName),
indexPath.getContent(),
"Project name/location not in file contents.")
quuxPath = outputPath.child("quux.html")
self.assertTrue(
quuxPath.exists(),
"Package documentation file %r did not exist." % (quuxPath.path,))
self.assertIn(
docstring, quuxPath.getContent(),
"Docstring not in package documentation file.")
self.assertIn(
'<a href="%s/%s">View Source</a>' % (sourceURL, packageName),
quuxPath.getContent())
self.assertIn(
'<a href="%s/%s/__init__.py#L1" class="functionSourceLink">' % (
sourceURL, packageName),
quuxPath.getContent())
self.assertIn(privateDocstring, quuxPath.getContent())
# There should also be a page for the foo function in quux.
self.assertTrue(quuxPath.sibling('quux.foo.html').exists())
self.assertEqual(stdout.getvalue(), '')
def test_buildWithPolicy(self):
"""
L{BuildAPIDocsScript.buildAPIDocs} builds the API docs with values
appropriate for the Twisted project.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
docstring = "text in docstring"
projectRoot = FilePath(self.mktemp())
packagePath = projectRoot.child("twisted")
packagePath.makedirs()
packagePath.child("__init__.py").setContent(
"def foo():\n"
" '%s'\n" % (docstring,))
packagePath.child("_version.py").setContent(
genVersion("twisted", 1, 0, 0))
outputPath = FilePath(self.mktemp())
script = BuildAPIDocsScript()
script.buildAPIDocs(projectRoot, outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(),
"API index %r did not exist." % (outputPath.path,))
self.assertIn(
'<a href="http://twistedmatrix.com/">Twisted</a>',
indexPath.getContent(),
"Project name/location not in file contents.")
twistedPath = outputPath.child("twisted.html")
self.assertTrue(
twistedPath.exists(),
"Package documentation file %r did not exist."
% (twistedPath.path,))
self.assertIn(
docstring, twistedPath.getContent(),
"Docstring not in package documentation file.")
#Here we check that it figured out the correct version based on the
#source code.
self.assertIn(
'<a href="http://twistedmatrix.com/trac/browser/tags/releases/'
'twisted-1.0.0/twisted">View Source</a>',
twistedPath.getContent())
self.assertEqual(stdout.getvalue(), '')
def test_apiBuilderScriptMainRequiresTwoArguments(self):
"""
SystemExit is raised when the incorrect number of command line
arguments are passed to the API building script.
"""
script = BuildAPIDocsScript()
self.assertRaises(SystemExit, script.main, [])
self.assertRaises(SystemExit, script.main, ["foo"])
self.assertRaises(SystemExit, script.main, ["foo", "bar", "baz"])
def test_apiBuilderScriptMain(self):
"""
The API building script invokes the same code that
L{test_buildWithPolicy} tests.
"""
script = BuildAPIDocsScript()
calls = []
script.buildAPIDocs = lambda a, b: calls.append((a, b))
script.main(["hello", "there"])
self.assertEqual(calls, [(FilePath("hello"), FilePath("there"))])
class ManBuilderTestCase(TestCase, BuilderTestsMixin):
"""
Tests for L{ManBuilder}.
"""
skip = loreSkip
def setUp(self):
"""
Set up a few instance variables that will be useful.
@ivar builder: A plain L{ManBuilder}.
@ivar manDir: A L{FilePath} representing a directory to be used for
containing man pages.
"""
BuilderTestsMixin.setUp(self)
self.builder = ManBuilder()
self.manDir = FilePath(self.mktemp())
self.manDir.createDirectory()
def test_noDocumentsFound(self):
"""
L{ManBuilder.build} raises L{NoDocumentsFound} if there are no
.1 files in the given directory.
"""
self.assertRaises(NoDocumentsFound, self.builder.build, self.manDir)
def test_build(self):
"""
Check that L{ManBuilder.build} find the man page in the directory, and
successfully produce a Lore content.
"""
manContent = self.getArbitraryManInput()
self.manDir.child('test1.1').setContent(manContent)
self.builder.build(self.manDir)
output = self.manDir.child('test1-man.xhtml').getContent()
expected = self.getArbitraryManLoreOutput()
# No-op on *nix, fix for windows
expected = expected.replace('\n', os.linesep)
self.assertEqual(output, expected)
def test_toHTML(self):
"""
Check that the content output by C{build} is compatible as input of
L{DocBuilder.build}.
"""
manContent = self.getArbitraryManInput()
self.manDir.child('test1.1').setContent(manContent)
self.builder.build(self.manDir)
templateFile = self.manDir.child("template.tpl")
templateFile.setContent(DocBuilderTestCase.template)
docBuilder = DocBuilder()
docBuilder.build("1.2.3", self.manDir, self.manDir,
templateFile)
output = self.manDir.child('test1-man.html').getContent()
self.assertXMLEqual(
output,
"""\
<?xml version="1.0" ?><html>
<head><title>Yo:MANHOLE.1</title></head>
<body>
<div class="content">
<span/>
<h2>NAME<a name="auto0"/></h2>
<p>manhole - Connect to a Twisted Manhole service
</p>
<h2>SYNOPSIS<a name="auto1"/></h2>
<p><strong>manhole</strong> </p>
<h2>DESCRIPTION<a name="auto2"/></h2>
<p>manhole is a GTK interface to Twisted Manhole services. You can execute
python code as if at an interactive Python console inside a running Twisted
process with this.</p>
</div>
<a href="index.html">Index</a>
<span class="version">Version: 1.2.3</span>
</body>
</html>""")
class BookBuilderTests(TestCase, BuilderTestsMixin):
"""
Tests for L{BookBuilder}.
"""
skip = latexSkip or loreSkip
def setUp(self):
"""
Make a directory into which to place temporary files.
"""
self.docCounter = 0
self.howtoDir = FilePath(self.mktemp())
self.howtoDir.makedirs()
def getArbitraryOutput(self, version, counter, prefix="", apiBaseURL=None):
"""
Create and return a C{str} containing the LaTeX document which is
expected as the output for processing the result of the document
returned by C{self.getArbitraryLoreInput(counter)}.
"""
path = self.howtoDir.child("%d.xhtml" % (counter,)).path
return (
r'\section{Hi! Title: %(count)s\label{%(path)s}}'
'\n'
r'Hi! %(count)sfoobar') % {'count': counter, 'path': path}
def test_runSuccess(self):
"""
L{BookBuilder.run} executes the command it is passed and returns a
string giving the stdout and stderr of the command if it completes
successfully.
"""
builder = BookBuilder()
self.assertEqual(
builder.run([
sys.executable, '-c',
'import sys; '
'sys.stdout.write("hi\\n"); '
'sys.stdout.flush(); '
'sys.stderr.write("bye\\n"); '
'sys.stderr.flush()']),
"hi\nbye\n")
def test_runFailed(self):
"""
L{BookBuilder.run} executes the command it is passed and raises
L{CommandFailed} if it completes unsuccessfully.
"""
builder = BookBuilder()
exc = self.assertRaises(
CommandFailed, builder.run,
[sys.executable, '-c', 'print "hi"; raise SystemExit(1)'])
self.assertEqual(exc.exitStatus, 1)
self.assertEqual(exc.exitSignal, None)
self.assertEqual(exc.output, "hi\n")
def test_runSignaled(self):
"""
L{BookBuilder.run} executes the command it is passed and raises
L{CommandFailed} if it exits due to a signal.
"""
builder = BookBuilder()
exc = self.assertRaises(
CommandFailed, builder.run,
[sys.executable, '-c',
'import sys; print "hi"; sys.stdout.flush(); '
'import os; os.kill(os.getpid(), 9)'])
self.assertEqual(exc.exitSignal, 9)
self.assertEqual(exc.exitStatus, None)
self.assertEqual(exc.output, "hi\n")
def test_buildTeX(self):
"""
L{BookBuilder.buildTeX} writes intermediate TeX files for all lore
input files in a directory.
"""
version = "3.2.1"
input1, output1 = self.getArbitraryLoreInputAndOutput(version)
input2, output2 = self.getArbitraryLoreInputAndOutput(version)
# Filenames are chosen by getArbitraryOutput to match the counter used
# by getArbitraryLoreInputAndOutput.
self.howtoDir.child("1.xhtml").setContent(input1)
self.howtoDir.child("2.xhtml").setContent(input2)
builder = BookBuilder()
builder.buildTeX(self.howtoDir)
self.assertEqual(self.howtoDir.child("1.tex").getContent(), output1)
self.assertEqual(self.howtoDir.child("2.tex").getContent(), output2)
def test_buildTeXRejectsInvalidDirectory(self):
"""
L{BookBuilder.buildTeX} raises L{ValueError} if passed a directory
which does not exist.
"""
builder = BookBuilder()
self.assertRaises(
ValueError, builder.buildTeX, self.howtoDir.temporarySibling())
def test_buildTeXOnlyBuildsXHTML(self):
"""
L{BookBuilder.buildTeX} ignores files which which don't end with
".xhtml".
"""
# Hopefully ">" is always a parse error from microdom!
self.howtoDir.child("not-input.dat").setContent(">")
self.test_buildTeX()
def test_stdout(self):
"""
L{BookBuilder.buildTeX} does not write to stdout.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
# Suppress warnings so that if there are any old-style plugins that
# lore queries for don't confuse the assertion below. See #3070.
self.patch(warnings, 'warn', lambda *a, **kw: None)
self.test_buildTeX()
self.assertEqual(stdout.getvalue(), '')
def test_buildPDFRejectsInvalidBookFilename(self):
"""
L{BookBuilder.buildPDF} raises L{ValueError} if the book filename does
not end with ".tex".
"""
builder = BookBuilder()
self.assertRaises(
ValueError,
builder.buildPDF,
FilePath(self.mktemp()).child("foo"),
None,
None)
def test_buildPDF(self):
"""
L{BookBuilder.buildPDF} creates a PDF given an index tex file and a
directory containing .tex files.
"""
bookPath = self.setupTeXFiles(self.howtoDir)
outputPath = FilePath(self.mktemp())
builder = BookBuilder()
builder.buildPDF(bookPath, self.howtoDir, outputPath)
self.assertTrue(outputPath.exists())
def test_buildPDFLongPath(self):
"""
L{BookBuilder.buildPDF} succeeds even if the paths it is operating on
are very long.
C{ps2pdf13} seems to have problems when path names are long. This test
verifies that even if inputs have long paths, generation still
succeeds.
"""
# Make it long.
self.howtoDir = self.howtoDir.child(
"x" * 128).child("x" * 128).child("x" * 128)
self.howtoDir.makedirs()
# This will use the above long path.
bookPath = self.setupTeXFiles(self.howtoDir)
outputPath = FilePath(self.mktemp())
builder = BookBuilder()
builder.buildPDF(bookPath, self.howtoDir, outputPath)
self.assertTrue(outputPath.exists())
def test_buildPDFRunsLaTeXThreeTimes(self):
"""
L{BookBuilder.buildPDF} runs C{latex} three times.
"""
class InspectableBookBuilder(BookBuilder):
def __init__(self):
BookBuilder.__init__(self)
self.commands = []
def run(self, command):
"""
Record the command and then execute it.
"""
self.commands.append(command)
return BookBuilder.run(self, command)
bookPath = self.setupTeXFiles(self.howtoDir)
outputPath = FilePath(self.mktemp())
builder = InspectableBookBuilder()
builder.buildPDF(bookPath, self.howtoDir, outputPath)
# These string comparisons are very fragile. It would be better to
# have a test which asserted the correctness of the contents of the
# output files. I don't know how one could do that, though. -exarkun
latex1, latex2, latex3, dvips, ps2pdf13 = builder.commands
self.assertEqual(latex1, latex2)
self.assertEqual(latex2, latex3)
self.assertEqual(
latex1[:1], ["latex"],
"LaTeX command %r does not seem right." % (latex1,))
self.assertEqual(
latex1[-1:], [bookPath.path],
"LaTeX command %r does not end with the book path (%r)." % (
latex1, bookPath.path))
self.assertEqual(
dvips[:1], ["dvips"],
"dvips command %r does not seem right." % (dvips,))
self.assertEqual(
ps2pdf13[:1], ["ps2pdf13"],
"ps2pdf13 command %r does not seem right." % (ps2pdf13,))
def test_noSideEffects(self):
"""
The working directory is the same before and after a call to
L{BookBuilder.buildPDF}. Also the contents of the directory containing
the input book are the same before and after the call.
"""
startDir = os.getcwd()
bookTeX = self.setupTeXFiles(self.howtoDir)
startTeXSiblings = bookTeX.parent().children()
startHowtoChildren = self.howtoDir.children()
builder = BookBuilder()
builder.buildPDF(bookTeX, self.howtoDir, FilePath(self.mktemp()))
self.assertEqual(startDir, os.getcwd())
self.assertEqual(startTeXSiblings, bookTeX.parent().children())
self.assertEqual(startHowtoChildren, self.howtoDir.children())
def test_failedCommandProvidesOutput(self):
"""
If a subprocess fails, L{BookBuilder.buildPDF} raises L{CommandFailed}
with the subprocess's output and leaves the temporary directory as a
sibling of the book path.
"""
bookTeX = FilePath(self.mktemp() + ".tex")
builder = BookBuilder()
inputState = bookTeX.parent().children()
exc = self.assertRaises(
CommandFailed,
builder.buildPDF,
bookTeX, self.howtoDir, FilePath(self.mktemp()))
self.assertTrue(exc.output)
newOutputState = set(bookTeX.parent().children()) - set(inputState)
self.assertEqual(len(newOutputState), 1)
workPath = newOutputState.pop()
self.assertTrue(
workPath.isdir(),
"Expected work path %r was not a directory." % (workPath.path,))
def test_build(self):
"""
L{BookBuilder.build} generates a pdf book file from some lore input
files.
"""
sections = range(1, 4)
for sectionNumber in sections:
self.howtoDir.child("%d.xhtml" % (sectionNumber,)).setContent(
self.getArbitraryLoreInput(sectionNumber))
bookTeX = self.setupTeXBook(sections, self.howtoDir)
bookPDF = FilePath(self.mktemp())
builder = BookBuilder()
builder.build(self.howtoDir, [self.howtoDir], bookTeX, bookPDF)
self.assertTrue(bookPDF.exists())
def test_buildRemovesTemporaryLaTeXFiles(self):
"""
L{BookBuilder.build} removes the intermediate LaTeX files it creates.
"""
sections = range(1, 4)
for sectionNumber in sections:
self.howtoDir.child("%d.xhtml" % (sectionNumber,)).setContent(
self.getArbitraryLoreInput(sectionNumber))
bookTeX = self.setupTeXBook(sections, self.howtoDir)
bookPDF = FilePath(self.mktemp())
builder = BookBuilder()
builder.build(self.howtoDir, [self.howtoDir], bookTeX, bookPDF)
self.assertEqual(
set(self.howtoDir.listdir()),
set([bookTeX.basename()] + ["%d.xhtml" % (n,) for n in sections]))
class FilePathDeltaTest(TestCase):
"""
Tests for L{filePathDelta}.
"""
def test_filePathDeltaSubdir(self):
"""
L{filePathDelta} can create a simple relative path to a child path.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/foo/bar/baz")),
["baz"])
def test_filePathDeltaSiblingDir(self):
"""
L{filePathDelta} can traverse upwards to create relative paths to
siblings.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/foo/baz")),
["..", "baz"])
def test_filePathNoCommonElements(self):
"""
L{filePathDelta} can create relative paths to totally unrelated paths
for maximum portability.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/baz/quux")),
["..", "..", "baz", "quux"])
def test_filePathDeltaSimilarEndElements(self):
"""
L{filePathDelta} doesn't take into account final elements when
comparing 2 paths, but stops at the first difference.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar/bar/spam"),
FilePath("/foo/bar/baz/spam")),
["..", "..", "baz", "spam"])
class NewsBuilderTests(TestCase, StructureAssertingMixin):
"""
Tests for L{NewsBuilder}.
"""
skip = svnSkip
def setUp(self):
"""
Create a fake project and stuff some basic structure and content into
it.
"""
self.builder = NewsBuilder()
self.project = FilePath(self.mktemp())
self.project.createDirectory()
self.existingText = 'Here is stuff which was present previously.\n'
self.createStructure(
self.project, {
'NEWS': self.existingText,
'5.feature': 'We now support the web.\n',
'12.feature': 'The widget is more robust.\n',
'15.feature': (
'A very long feature which takes many words to '
'describe with any accuracy was introduced so that '
'the line wrapping behavior of the news generating '
'code could be verified.\n'),
'16.feature': (
'A simpler feature\ndescribed on multiple lines\n'
'was added.\n'),
'23.bugfix': 'Broken stuff was fixed.\n',
'25.removal': 'Stupid stuff was deprecated.\n',
'30.misc': '',
'35.misc': '',
'40.doc': 'foo.bar.Baz.quux',
'41.doc': 'writing Foo servers'})
def svnCommit(self, project=None):
"""
Make the C{project} directory a valid subversion directory with all
files committed.
"""
if project is None:
project = self.project
repositoryPath = self.mktemp()
repository = FilePath(repositoryPath)
runCommand(["svnadmin", "create", repository.path])
runCommand(["svn", "checkout", "file://" + repository.path,
project.path])
runCommand(["svn", "add"] + glob.glob(project.path + "/*"))
runCommand(["svn", "commit", project.path, "-m", "yay"])
def test_today(self):
"""
L{NewsBuilder._today} returns today's date in YYYY-MM-DD form.
"""
self.assertEqual(
self.builder._today(), date.today().strftime('%Y-%m-%d'))
def test_findFeatures(self):
"""
When called with L{NewsBuilder._FEATURE}, L{NewsBuilder._findChanges}
returns a list of bugfix ticket numbers and descriptions as a list of
two-tuples.
"""
features = self.builder._findChanges(
self.project, self.builder._FEATURE)
self.assertEqual(
features,
[(5, "We now support the web."),
(12, "The widget is more robust."),
(15,
"A very long feature which takes many words to describe with "
"any accuracy was introduced so that the line wrapping behavior "
"of the news generating code could be verified."),
(16, "A simpler feature described on multiple lines was added.")])
def test_findBugfixes(self):
"""
When called with L{NewsBuilder._BUGFIX}, L{NewsBuilder._findChanges}
returns a list of bugfix ticket numbers and descriptions as a list of
two-tuples.
"""
bugfixes = self.builder._findChanges(
self.project, self.builder._BUGFIX)
self.assertEqual(
bugfixes,
[(23, 'Broken stuff was fixed.')])
def test_findRemovals(self):
"""
When called with L{NewsBuilder._REMOVAL}, L{NewsBuilder._findChanges}
returns a list of removal/deprecation ticket numbers and descriptions
as a list of two-tuples.
"""
removals = self.builder._findChanges(
self.project, self.builder._REMOVAL)
self.assertEqual(
removals,
[(25, 'Stupid stuff was deprecated.')])
def test_findDocumentation(self):
"""
When called with L{NewsBuilder._DOC}, L{NewsBuilder._findChanges}
returns a list of documentation ticket numbers and descriptions as a
list of two-tuples.
"""
doc = self.builder._findChanges(
self.project, self.builder._DOC)
self.assertEqual(
doc,
[(40, 'foo.bar.Baz.quux'),
(41, 'writing Foo servers')])
def test_findMiscellaneous(self):
"""
When called with L{NewsBuilder._MISC}, L{NewsBuilder._findChanges}
returns a list of removal/deprecation ticket numbers and descriptions
as a list of two-tuples.
"""
misc = self.builder._findChanges(
self.project, self.builder._MISC)
self.assertEqual(
misc,
[(30, ''),
(35, '')])
def test_writeHeader(self):
"""
L{NewsBuilder._writeHeader} accepts a file-like object opened for
writing and a header string and writes out a news file header to it.
"""
output = StringIO()
self.builder._writeHeader(output, "Super Awesometastic 32.16")
self.assertEqual(
output.getvalue(),
"Super Awesometastic 32.16\n"
"=========================\n"
"\n")
def test_writeSection(self):
"""
L{NewsBuilder._writeSection} accepts a file-like object opened for
writing, a section name, and a list of ticket information (as returned
by L{NewsBuilder._findChanges}) and writes out a section header and all
of the given ticket information.
"""
output = StringIO()
self.builder._writeSection(
output, "Features",
[(3, "Great stuff."),
(17, "Very long line which goes on and on and on, seemingly "
"without end until suddenly without warning it does end.")])
self.assertEqual(
output.getvalue(),
"Features\n"
"--------\n"
" - Great stuff. (#3)\n"
" - Very long line which goes on and on and on, seemingly "
"without end\n"
" until suddenly without warning it does end. (#17)\n"
"\n")
def test_writeMisc(self):
"""
L{NewsBuilder._writeMisc} accepts a file-like object opened for
writing, a section name, and a list of ticket information (as returned
by L{NewsBuilder._findChanges} and writes out a section header and all
of the ticket numbers, but excludes any descriptions.
"""
output = StringIO()
self.builder._writeMisc(
output, "Other",
[(x, "") for x in range(2, 50, 3)])
self.assertEqual(
output.getvalue(),
"Other\n"
"-----\n"
" - #2, #5, #8, #11, #14, #17, #20, #23, #26, #29, #32, #35, "
"#38, #41,\n"
" #44, #47\n"
"\n")
def test_build(self):
"""
L{NewsBuilder.build} updates a NEWS file with new features based on the
I{<ticket>.feature} files found in the directory specified.
"""
self.builder.build(
self.project, self.project.child('NEWS'),
"Super Awesometastic 32.16")
results = self.project.child('NEWS').getContent()
self.assertEqual(
results,
'Super Awesometastic 32.16\n'
'=========================\n'
'\n'
'Features\n'
'--------\n'
' - We now support the web. (#5)\n'
' - The widget is more robust. (#12)\n'
' - A very long feature which takes many words to describe '
'with any\n'
' accuracy was introduced so that the line wrapping behavior '
'of the\n'
' news generating code could be verified. (#15)\n'
' - A simpler feature described on multiple lines was '
'added. (#16)\n'
'\n'
'Bugfixes\n'
'--------\n'
' - Broken stuff was fixed. (#23)\n'
'\n'
'Improved Documentation\n'
'----------------------\n'
' - foo.bar.Baz.quux (#40)\n'
' - writing Foo servers (#41)\n'
'\n'
'Deprecations and Removals\n'
'-------------------------\n'
' - Stupid stuff was deprecated. (#25)\n'
'\n'
'Other\n'
'-----\n'
' - #30, #35\n'
'\n\n' + self.existingText)
def test_emptyProjectCalledOut(self):
"""
If no changes exist for a project, I{NEWS} gains a new section for
that project that includes some helpful text about how there were no
interesting changes.
"""
project = FilePath(self.mktemp()).child("twisted")
project.makedirs()
self.createStructure(project, {'NEWS': self.existingText})
self.builder.build(
project, project.child('NEWS'),
"Super Awesometastic 32.16")
results = project.child('NEWS').getContent()
self.assertEqual(
results,
'Super Awesometastic 32.16\n'
'=========================\n'
'\n' +
self.builder._NO_CHANGES +
'\n\n' + self.existingText)
def test_preserveTicketHint(self):
"""
If a I{NEWS} file begins with the two magic lines which point readers
at the issue tracker, those lines are kept at the top of the new file.
"""
news = self.project.child('NEWS')
news.setContent(
'Ticket numbers in this file can be looked up by visiting\n'
'http://twistedmatrix.com/trac/ticket/<number>\n'
'\n'
'Blah blah other stuff.\n')
self.builder.build(self.project, news, "Super Awesometastic 32.16")
self.assertEqual(
news.getContent(),
'Ticket numbers in this file can be looked up by visiting\n'
'http://twistedmatrix.com/trac/ticket/<number>\n'
'\n'
'Super Awesometastic 32.16\n'
'=========================\n'
'\n'
'Features\n'
'--------\n'
' - We now support the web. (#5)\n'
' - The widget is more robust. (#12)\n'
' - A very long feature which takes many words to describe '
'with any\n'
' accuracy was introduced so that the line wrapping behavior '
'of the\n'
' news generating code could be verified. (#15)\n'
' - A simpler feature described on multiple lines was '
'added. (#16)\n'
'\n'
'Bugfixes\n'
'--------\n'
' - Broken stuff was fixed. (#23)\n'
'\n'
'Improved Documentation\n'
'----------------------\n'
' - foo.bar.Baz.quux (#40)\n'
' - writing Foo servers (#41)\n'
'\n'
'Deprecations and Removals\n'
'-------------------------\n'
' - Stupid stuff was deprecated. (#25)\n'
'\n'
'Other\n'
'-----\n'
' - #30, #35\n'
'\n\n'
'Blah blah other stuff.\n')
def test_emptySectionsOmitted(self):
"""
If there are no changes of a particular type (feature, bugfix, etc), no
section for that type is written by L{NewsBuilder.build}.
"""
for ticket in self.project.children():
if ticket.splitext()[1] in ('.feature', '.misc', '.doc'):
ticket.remove()
self.builder.build(
self.project, self.project.child('NEWS'),
'Some Thing 1.2')
self.assertEqual(
self.project.child('NEWS').getContent(),
'Some Thing 1.2\n'
'==============\n'
'\n'
'Bugfixes\n'
'--------\n'
' - Broken stuff was fixed. (#23)\n'
'\n'
'Deprecations and Removals\n'
'-------------------------\n'
' - Stupid stuff was deprecated. (#25)\n'
'\n\n'
'Here is stuff which was present previously.\n')
def test_duplicatesMerged(self):
"""
If two change files have the same contents, they are merged in the
generated news entry.
"""
def feature(s):
return self.project.child(s + '.feature')
feature('5').copyTo(feature('15'))
feature('5').copyTo(feature('16'))
self.builder.build(
self.project, self.project.child('NEWS'),
'Project Name 5.0')
self.assertEqual(
self.project.child('NEWS').getContent(),
'Project Name 5.0\n'
'================\n'
'\n'
'Features\n'
'--------\n'
' - We now support the web. (#5, #15, #16)\n'
' - The widget is more robust. (#12)\n'
'\n'
'Bugfixes\n'
'--------\n'
' - Broken stuff was fixed. (#23)\n'
'\n'
'Improved Documentation\n'
'----------------------\n'
' - foo.bar.Baz.quux (#40)\n'
' - writing Foo servers (#41)\n'
'\n'
'Deprecations and Removals\n'
'-------------------------\n'
' - Stupid stuff was deprecated. (#25)\n'
'\n'
'Other\n'
'-----\n'
' - #30, #35\n'
'\n\n'
'Here is stuff which was present previously.\n')
def createFakeTwistedProject(self):
"""
Create a fake-looking Twisted project to build from.
"""
project = FilePath(self.mktemp()).child("twisted")
project.makedirs()
self.createStructure(
project, {
'NEWS': 'Old boring stuff from the past.\n',
'_version.py': genVersion("twisted", 1, 2, 3),
'topfiles': {
'NEWS': 'Old core news.\n',
'3.feature': 'Third feature addition.\n',
'5.misc': ''},
'conch': {
'_version.py': genVersion("twisted.conch", 3, 4, 5),
'topfiles': {
'NEWS': 'Old conch news.\n',
'7.bugfix': 'Fixed that bug.\n'}}})
return project
def test_buildAll(self):
"""
L{NewsBuilder.buildAll} calls L{NewsBuilder.build} once for each
subproject, passing that subproject's I{topfiles} directory as C{path},
the I{NEWS} file in that directory as C{output}, and the subproject's
name as C{header}, and then again for each subproject with the
top-level I{NEWS} file for C{output}. Blacklisted subprojects are
skipped.
"""
builds = []
builder = NewsBuilder()
builder.build = lambda path, output, header: builds.append((
path, output, header))
builder._today = lambda: '2009-12-01'
project = self.createFakeTwistedProject()
self.svnCommit(project)
builder.buildAll(project)
coreTopfiles = project.child("topfiles")
coreNews = coreTopfiles.child("NEWS")
coreHeader = "Twisted Core 1.2.3 (2009-12-01)"
conchTopfiles = project.child("conch").child("topfiles")
conchNews = conchTopfiles.child("NEWS")
conchHeader = "Twisted Conch 3.4.5 (2009-12-01)"
aggregateNews = project.child("NEWS")
self.assertEqual(
builds,
[(conchTopfiles, conchNews, conchHeader),
(conchTopfiles, aggregateNews, conchHeader),
(coreTopfiles, coreNews, coreHeader),
(coreTopfiles, aggregateNews, coreHeader)])
def test_buildAllAggregate(self):
"""
L{NewsBuilder.buildAll} aggregates I{NEWS} information into the top
files, only deleting fragments once it's done.
"""
builder = NewsBuilder()
project = self.createFakeTwistedProject()
self.svnCommit(project)
builder.buildAll(project)
aggregateNews = project.child("NEWS")
aggregateContent = aggregateNews.getContent()
self.assertIn("Third feature addition", aggregateContent)
self.assertIn("Fixed that bug", aggregateContent)
self.assertIn("Old boring stuff from the past", aggregateContent)
def test_changeVersionInNews(self):
"""
L{NewsBuilder._changeVersions} gets the release date for a given
version of a project as a string.
"""
builder = NewsBuilder()
builder._today = lambda: '2009-12-01'
project = self.createFakeTwistedProject()
self.svnCommit(project)
builder.buildAll(project)
newVersion = Version('TEMPLATE', 7, 7, 14)
coreNews = project.child('topfiles').child('NEWS')
# twisted 1.2.3 is the old version.
builder._changeNewsVersion(
coreNews, "Core", Version("twisted", 1, 2, 3),
newVersion, '2010-01-01')
expectedCore = (
'Twisted Core 7.7.14 (2010-01-01)\n'
'================================\n'
'\n'
'Features\n'
'--------\n'
' - Third feature addition. (#3)\n'
'\n'
'Other\n'
'-----\n'
' - #5\n\n\n')
self.assertEqual(
expectedCore + 'Old core news.\n', coreNews.getContent())
def test_removeNEWSfragments(self):
"""
L{NewsBuilder.buildALL} removes all the NEWS fragments after the build
process, using the C{svn} C{rm} command.
"""
builder = NewsBuilder()
project = self.createFakeTwistedProject()
self.svnCommit(project)
builder.buildAll(project)
self.assertEqual(5, len(project.children()))
output = runCommand(["svn", "status", project.path])
removed = [line for line in output.splitlines()
if line.startswith("D ")]
self.assertEqual(3, len(removed))
def test_checkSVN(self):
"""
L{NewsBuilder.buildAll} raises L{NotWorkingDirectory} when the given
path is not a SVN checkout.
"""
self.assertRaises(
NotWorkingDirectory, self.builder.buildAll, self.project)
class SphinxBuilderTests(TestCase):
"""
Tests for L{SphinxBuilder}.
@note: This test case depends on twisted.web, which violates the standard
Twisted practice of not having anything in twisted.python depend on
other Twisted packages and opens up the possibility of creating
circular dependencies. Do not use this as an example of how to
structure your dependencies.
@ivar builder: A plain L{SphinxBuilder}.
@ivar sphinxDir: A L{FilePath} representing a directory to be used for
containing a Sphinx project.
@ivar sourceDir: A L{FilePath} representing a directory to be used for
containing the source files for a Sphinx project.
"""
skip = sphinxSkip
confContent = """\
source_suffix = '.rst'
master_doc = 'index'
"""
confContent = textwrap.dedent(confContent)
indexContent = """\
==============
This is a Test
==============
This is only a test
-------------------
In case you hadn't figured it out yet, this is a test.
"""
indexContent = textwrap.dedent(indexContent)
def setUp(self):
"""
Set up a few instance variables that will be useful.
"""
self.builder = SphinxBuilder()
# set up a place for a fake sphinx project
self.sphinxDir = FilePath(self.mktemp())
self.sphinxDir.makedirs()
self.sourceDir = self.sphinxDir.child('source')
self.sourceDir.makedirs()
def createFakeSphinxProject(self):
"""
Create a fake Sphinx project for test purposes.
Creates a fake Sphinx project with the absolute minimum of source
files. This includes a single source file ('index.rst') and the
smallest 'conf.py' file possible in order to find that source file.
"""
self.sourceDir.child("conf.py").setContent(self.confContent)
self.sourceDir.child("index.rst").setContent(self.indexContent)
def verifyFileExists(self, fileDir, fileName):
"""
Helper which verifies that C{fileName} exists in C{fileDir}, has some
content, and that the content is parseable by L{parseXMLString} if the
file extension indicates that it should be html.
@param fileDir: A path to a directory.
@type fileDir: L{FilePath}
@param fileName: The last path segment of a file which may exist within
C{fileDir}.
@type fileName: L{str}
@raise: L{FailTest <twisted.trial.unittest.FailTest>} if
C{fileDir.child(fileName)}:
1. Does not exist.
2. Is empty.
3. In the case where it's a path to a C{.html} file, the
contents at least look enough like HTML to parse according
to microdom's generous criteria.
@return: C{None}
"""
# check that file exists
fpath = fileDir.child(fileName)
self.assertTrue(fpath.exists())
# check that the output files have some content
fcontents = fpath.getContent()
self.assertTrue(len(fcontents) > 0)
# check that the html files are at least html-ish
# this is not a terribly rigorous check
if fpath.path.endswith('.html'):
parseXMLString(fcontents)
def test_build(self):
"""
Creates and builds a fake Sphinx project using a L{SphinxBuilder}.
"""
self.createFakeSphinxProject()
self.builder.build(self.sphinxDir)
# assert some stuff
for each in ['doctrees', 'html']:
fpath = self.sphinxDir.child('build').child(each)
self.assertTrue(fpath.exists())
htmlDir = self.sphinxDir.child('build').child('html')
self.verifyFileExists(htmlDir, 'index.html')
self.verifyFileExists(htmlDir, 'genindex.html')
self.verifyFileExists(htmlDir, 'objects.inv')
self.verifyFileExists(htmlDir, 'search.html')
self.verifyFileExists(htmlDir, 'searchindex.js')
def test_failToBuild(self):
"""
Check that SphinxBuilder.build fails when run against a non-sphinx
directory.
"""
# note no fake sphinx project is created
self.assertRaises(CommandFailed,
self.builder.build,
self.sphinxDir)
class DistributionBuilderTestBase(BuilderTestsMixin, StructureAssertingMixin,
TestCase):
"""
Base for tests of L{DistributionBuilder}.
"""
skip = loreSkip
def setUp(self):
BuilderTestsMixin.setUp(self)
self.rootDir = FilePath(self.mktemp())
self.rootDir.createDirectory()
self.outputDir = FilePath(self.mktemp())
self.outputDir.createDirectory()
self.builder = DistributionBuilder(self.rootDir, self.outputDir)
class DistributionBuilderTest(DistributionBuilderTestBase):
def test_twistedDistribution(self):
"""
The Twisted tarball contains everything in the source checkout, with
built documentation.
"""
loreInput, loreOutput = self.getArbitraryLoreInputAndOutput("10.0.0")
manInput1 = self.getArbitraryManInput()
manOutput1 = self.getArbitraryManHTMLOutput("10.0.0", "../howto/")
manInput2 = self.getArbitraryManInput()
manOutput2 = self.getArbitraryManHTMLOutput("10.0.0", "../howto/")
coreIndexInput, coreIndexOutput = self.getArbitraryLoreInputAndOutput(
"10.0.0", prefix="howto/")
structure = {
"README": "Twisted",
"unrelated": "x",
"LICENSE": "copyright!",
"setup.py": "import toplevel",
"bin": {"web": {"websetroot": "SET ROOT"},
"twistd": "TWISTD"},
"twisted": {
"web": {
"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL",
"README": "WEB!"}},
"words": {"__init__.py": "import WORDS"},
"plugins": {"twisted_web.py": "import WEBPLUG",
"twisted_words.py": "import WORDPLUG"}},
"doc": {"web": {"howto": {"index.xhtml": loreInput},
"man": {"websetroot.1": manInput2}},
"core": {"howto": {"template.tpl": self.template},
"man": {"twistd.1": manInput1},
"index.xhtml": coreIndexInput}}}
outStructure = {
"README": "Twisted",
"unrelated": "x",
"LICENSE": "copyright!",
"setup.py": "import toplevel",
"bin": {"web": {"websetroot": "SET ROOT"},
"twistd": "TWISTD"},
"twisted": {
"web": {"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL",
"README": "WEB!"}},
"words": {"__init__.py": "import WORDS"},
"plugins": {"twisted_web.py": "import WEBPLUG",
"twisted_words.py": "import WORDPLUG"}},
"doc": {"web": {"howto": {"index.html": loreOutput},
"man": {"websetroot.1": manInput2,
"websetroot-man.html": manOutput2}},
"core": {"howto": {"template.tpl": self.template},
"man": {"twistd.1": manInput1,
"twistd-man.html": manOutput1},
"index.html": coreIndexOutput}}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildTwisted("10.0.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_excluded(self):
"""
bin/admin and doc/historic are excluded from the Twisted tarball.
"""
structure = {
"bin": {"admin": {"blah": "ADMIN"},
"twistd": "TWISTD"},
"twisted": {
"web": {
"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL",
"README": "WEB!"}}},
"doc": {"historic": {"hello": "there"},
"other": "contents"}}
outStructure = {
"bin": {"twistd": "TWISTD"},
"twisted": {
"web": {
"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL",
"README": "WEB!"}}},
"doc": {"other": "contents"}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildTwisted("10.0.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_subProjectLayout(self):
"""
The subproject tarball includes files like so:
1. twisted/<subproject>/topfiles defines the files that will be in the
top level in the tarball, except LICENSE, which comes from the real
top-level directory.
2. twisted/<subproject> is included, but without the topfiles entry
in that directory. No other twisted subpackages are included.
3. twisted/plugins/twisted_<subproject>.py is included, but nothing
else in plugins is.
"""
structure = {
"README": "HI!@",
"unrelated": "x",
"LICENSE": "copyright!",
"setup.py": "import toplevel",
"bin": {"web": {"websetroot": "SET ROOT"},
"words": {"im": "#!im"}},
"twisted": {
"web": {
"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL",
"README": "WEB!"}},
"words": {"__init__.py": "import WORDS"},
"plugins": {"twisted_web.py": "import WEBPLUG",
"twisted_words.py": "import WORDPLUG"}}}
outStructure = {
"README": "WEB!",
"LICENSE": "copyright!",
"setup.py": "import WEBINSTALL",
"bin": {"websetroot": "SET ROOT"},
"twisted": {"web": {"__init__.py": "import WEB"},
"plugins": {"twisted_web.py": "import WEBPLUG"}}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildSubProject("web", "0.3.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_minimalSubProjectLayout(self):
"""
buildSubProject should work with minimal subprojects.
"""
structure = {
"LICENSE": "copyright!",
"bin": {},
"twisted": {
"web": {"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINSTALL"}},
"plugins": {}}}
outStructure = {
"setup.py": "import WEBINSTALL",
"LICENSE": "copyright!",
"twisted": {"web": {"__init__.py": "import WEB"}}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildSubProject("web", "0.3.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_subProjectDocBuilding(self):
"""
When building a subproject release, documentation should be built with
lore.
"""
loreInput, loreOutput = self.getArbitraryLoreInputAndOutput("0.3.0")
manInput = self.getArbitraryManInput()
manOutput = self.getArbitraryManHTMLOutput("0.3.0", "../howto/")
structure = {
"LICENSE": "copyright!",
"twisted": {"web": {"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINST"}}},
"doc": {"web": {"howto": {"index.xhtml": loreInput},
"man": {"twistd.1": manInput}},
"core": {"howto": {"template.tpl": self.template}}}}
outStructure = {
"LICENSE": "copyright!",
"setup.py": "import WEBINST",
"twisted": {"web": {"__init__.py": "import WEB"}},
"doc": {"howto": {"index.html": loreOutput},
"man": {"twistd.1": manInput,
"twistd-man.html": manOutput}}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildSubProject("web", "0.3.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_coreProjectLayout(self):
"""
The core tarball looks a lot like a subproject tarball, except it
doesn't include:
- Python packages from other subprojects
- plugins from other subprojects
- scripts from other subprojects
"""
indexInput, indexOutput = self.getArbitraryLoreInputAndOutput(
"8.0.0", prefix="howto/")
howtoInput, howtoOutput = self.getArbitraryLoreInputAndOutput("8.0.0")
specInput, specOutput = self.getArbitraryLoreInputAndOutput(
"8.0.0", prefix="../howto/")
tutorialInput, tutorialOutput = self.getArbitraryLoreInputAndOutput(
"8.0.0", prefix="../")
structure = {
"LICENSE": "copyright!",
"twisted": {"__init__.py": "twisted",
"python": {"__init__.py": "python",
"roots.py": "roots!"},
"conch": {"__init__.py": "conch",
"unrelated.py": "import conch"},
"plugin.py": "plugin",
"plugins": {"twisted_web.py": "webplug",
"twisted_whatever.py": "include!",
"cred.py": "include!"},
"topfiles": {"setup.py": "import CORE",
"README": "core readme"}},
"doc": {"core": {"howto": {"template.tpl": self.template,
"index.xhtml": howtoInput,
"tutorial": {
"index.xhtml": tutorialInput}},
"specifications": {"index.xhtml": specInput},
"examples": {"foo.py": "foo.py"},
"index.xhtml": indexInput},
"web": {"howto": {"index.xhtml": "webindex"}}},
"bin": {"twistd": "TWISTD",
"web": {"websetroot": "websetroot"}}}
outStructure = {
"LICENSE": "copyright!",
"setup.py": "import CORE",
"README": "core readme",
"twisted": {"__init__.py": "twisted",
"python": {"__init__.py": "python",
"roots.py": "roots!"},
"plugin.py": "plugin",
"plugins": {"twisted_whatever.py": "include!",
"cred.py": "include!"}},
"doc": {"howto": {"template.tpl": self.template,
"index.html": howtoOutput,
"tutorial": {"index.html": tutorialOutput}},
"specifications": {"index.html": specOutput},
"examples": {"foo.py": "foo.py"},
"index.html": indexOutput},
"bin": {"twistd": "TWISTD"}}
self.createStructure(self.rootDir, structure)
outputFile = self.builder.buildCore("8.0.0")
self.assertExtractedStructure(outputFile, outStructure)
def test_apiBaseURL(self):
"""
DistributionBuilder builds documentation with the specified
API base URL.
"""
apiBaseURL = "http://%s"
builder = DistributionBuilder(self.rootDir, self.outputDir,
apiBaseURL=apiBaseURL)
loreInput, loreOutput = self.getArbitraryLoreInputAndOutput(
"0.3.0", apiBaseURL=apiBaseURL)
structure = {
"LICENSE": "copyright!",
"twisted": {"web": {"__init__.py": "import WEB",
"topfiles": {"setup.py": "import WEBINST"}}},
"doc": {"web": {"howto": {"index.xhtml": loreInput}},
"core": {"howto": {"template.tpl": self.template}}}}
outStructure = {
"LICENSE": "copyright!",
"setup.py": "import WEBINST",
"twisted": {"web": {"__init__.py": "import WEB"}},
"doc": {"howto": {"index.html": loreOutput}}}
self.createStructure(self.rootDir, structure)
outputFile = builder.buildSubProject("web", "0.3.0")
self.assertExtractedStructure(outputFile, outStructure)
class BuildAllTarballsTest(DistributionBuilderTestBase):
"""
Tests for L{DistributionBuilder.buildAllTarballs}.
"""
skip = svnSkip
def test_buildAllTarballs(self):
"""
L{buildAllTarballs} builds tarballs for Twisted and all of its
subprojects based on an SVN checkout; the resulting tarballs contain
no SVN metadata. This involves building documentation, which it will
build with the correct API documentation reference base URL.
"""
repositoryPath = self.mktemp()
repository = FilePath(repositoryPath)
checkoutPath = self.mktemp()
checkout = FilePath(checkoutPath)
self.outputDir.remove()
runCommand(["svnadmin", "create", repositoryPath])
runCommand(["svn", "checkout", "file://" + repository.path,
checkout.path])
coreIndexInput, coreIndexOutput = self.getArbitraryLoreInputAndOutput(
"1.2.0", prefix="howto/",
apiBaseURL="http://twistedmatrix.com/documents/1.2.0/api/%s.html")
structure = {
"README": "Twisted",
"unrelated": "x",
"LICENSE": "copyright!",
"setup.py": "import toplevel",
"bin": {"words": {"im": "import im"},
"twistd": "TWISTD"},
"twisted": {
"topfiles": {"setup.py": "import TOPINSTALL",
"README": "CORE!"},
"_version.py": genVersion("twisted", 1, 2, 0),
"words": {"__init__.py": "import WORDS",
"_version.py": genVersion("twisted.words", 1, 2, 0),
"topfiles": {"setup.py": "import WORDSINSTALL",
"README": "WORDS!"}},
"plugins": {"twisted_web.py": "import WEBPLUG",
"twisted_words.py": "import WORDPLUG",
"twisted_yay.py": "import YAY"}},
"doc": {"core": {"howto": {"template.tpl": self.template},
"index.xhtml": coreIndexInput}}}
twistedStructure = {
"README": "Twisted",
"unrelated": "x",
"LICENSE": "copyright!",
"setup.py": "import toplevel",
"bin": {"twistd": "TWISTD",
"words": {"im": "import im"}},
"twisted": {
"topfiles": {"setup.py": "import TOPINSTALL",
"README": "CORE!"},
"_version.py": genVersion("twisted", 1, 2, 0),
"words": {"__init__.py": "import WORDS",
"_version.py": genVersion("twisted.words", 1, 2, 0),
"topfiles": {"setup.py": "import WORDSINSTALL",
"README": "WORDS!"}},
"plugins": {"twisted_web.py": "import WEBPLUG",
"twisted_words.py": "import WORDPLUG",
"twisted_yay.py": "import YAY"}},
"doc": {"core": {"howto": {"template.tpl": self.template},
"index.html": coreIndexOutput}}}
coreStructure = {
"setup.py": "import TOPINSTALL",
"README": "CORE!",
"LICENSE": "copyright!",
"bin": {"twistd": "TWISTD"},
"twisted": {
"_version.py": genVersion("twisted", 1, 2, 0),
"plugins": {"twisted_yay.py": "import YAY"}},
"doc": {"howto": {"template.tpl": self.template},
"index.html": coreIndexOutput}}
wordsStructure = {
"README": "WORDS!",
"LICENSE": "copyright!",
"setup.py": "import WORDSINSTALL",
"bin": {"im": "import im"},
"twisted": {
"words": {"__init__.py": "import WORDS",
"_version.py": genVersion("twisted.words", 1, 2, 0)},
"plugins": {"twisted_words.py": "import WORDPLUG"}}}
self.createStructure(checkout, structure)
childs = [x.path for x in checkout.children()]
runCommand(["svn", "add"] + childs)
runCommand(["svn", "commit", checkout.path, "-m", "yay"])
buildAllTarballs(checkout, self.outputDir)
self.assertEqual(
set(self.outputDir.children()),
set([self.outputDir.child("Twisted-1.2.0.tar.bz2"),
self.outputDir.child("TwistedCore-1.2.0.tar.bz2"),
self.outputDir.child("TwistedWords-1.2.0.tar.bz2")]))
self.assertExtractedStructure(
self.outputDir.child("Twisted-1.2.0.tar.bz2"),
twistedStructure)
self.assertExtractedStructure(
self.outputDir.child("TwistedCore-1.2.0.tar.bz2"),
coreStructure)
self.assertExtractedStructure(
self.outputDir.child("TwistedWords-1.2.0.tar.bz2"),
wordsStructure)
def test_buildAllTarballsEnsuresCleanCheckout(self):
"""
L{UncleanWorkingDirectory} is raised by L{buildAllTarballs} when the
SVN checkout provided has uncommitted changes.
"""
repositoryPath = self.mktemp()
repository = FilePath(repositoryPath)
checkoutPath = self.mktemp()
checkout = FilePath(checkoutPath)
runCommand(["svnadmin", "create", repositoryPath])
runCommand(["svn", "checkout", "file://" + repository.path,
checkout.path])
checkout.child("foo").setContent("whatever")
self.assertRaises(UncleanWorkingDirectory,
buildAllTarballs, checkout, FilePath(self.mktemp()))
def test_buildAllTarballsEnsuresExistingCheckout(self):
"""
L{NotWorkingDirectory} is raised by L{buildAllTarballs} when the
checkout passed does not exist or is not an SVN checkout.
"""
checkout = FilePath(self.mktemp())
self.assertRaises(NotWorkingDirectory,
buildAllTarballs,
checkout, FilePath(self.mktemp()))
checkout.createDirectory()
self.assertRaises(NotWorkingDirectory,
buildAllTarballs,
checkout, FilePath(self.mktemp()))
class ScriptTests(BuilderTestsMixin, StructureAssertingMixin, TestCase):
"""
Tests for the release script functionality.
"""
def _testVersionChanging(self, prerelease, patch):
"""
Check that L{ChangeVersionsScript.main} calls the version-changing
function with the appropriate version data and filesystem path.
"""
versionUpdates = []
def myVersionChanger(sourceTree, prerelease, patch):
versionUpdates.append((sourceTree, prerelease, patch))
versionChanger = ChangeVersionsScript()
versionChanger.changeAllProjectVersions = myVersionChanger
args = []
if prerelease:
args.append("--prerelease")
if patch:
args.append("--patch")
versionChanger.main(args)
self.assertEqual(len(versionUpdates), 1)
self.assertEqual(versionUpdates[0][0], FilePath("."))
self.assertEqual(versionUpdates[0][1], prerelease)
self.assertEqual(versionUpdates[0][2], patch)
def test_changeVersions(self):
"""
L{ChangeVersionsScript.main} changes version numbers for all Twisted
projects.
"""
self._testVersionChanging(False, False)
def test_changeVersionsWithPrerelease(self):
"""
A prerelease can be created with L{changeVersionsScript}.
"""
self._testVersionChanging(True, False)
def test_changeVersionsWithPatch(self):
"""
A patch release can be created with L{changeVersionsScript}.
"""
self._testVersionChanging(False, True)
def test_defaultChangeVersionsVersionChanger(self):
"""
The default implementation of C{changeAllProjectVersions} is
L{changeAllProjectVersions}.
"""
versionChanger = ChangeVersionsScript()
self.assertEqual(versionChanger.changeAllProjectVersions,
changeAllProjectVersions)
def test_badNumberOfArgumentsToChangeVersionsScript(self):
"""
L{changeVersionsScript} raises SystemExit when the wrong arguments are
passed.
"""
versionChanger = ChangeVersionsScript()
self.assertRaises(SystemExit, versionChanger.main, ["12.3.0"])
def test_tooManyDotsToChangeVersionsScript(self):
"""
L{changeVersionsScript} raises SystemExit when there are the wrong
number of segments in the version number passed.
"""
versionChanger = ChangeVersionsScript()
self.assertRaises(SystemExit, versionChanger.main,
["3.2.1.0"])
def test_nonIntPartsToChangeVersionsScript(self):
"""
L{changeVersionsScript} raises SystemExit when the version number isn't
made out of numbers.
"""
versionChanger = ChangeVersionsScript()
self.assertRaises(SystemExit, versionChanger.main,
["my united.states.of prewhatever"])
def test_buildTarballsScript(self):
"""
L{BuildTarballsScript.main} invokes L{buildAllTarballs} with
2 or 3 L{FilePath} instances representing the paths passed to it.
"""
builds = []
def myBuilder(checkout, destination, template=None):
builds.append((checkout, destination, template))
tarballBuilder = BuildTarballsScript()
tarballBuilder.buildAllTarballs = myBuilder
tarballBuilder.main(["checkoutDir", "destinationDir"])
self.assertEqual(
builds,
[(FilePath("checkoutDir"), FilePath("destinationDir"), None)])
builds = []
tarballBuilder.main(["checkoutDir", "destinationDir", "templatePath"])
self.assertEqual(
builds,
[(FilePath("checkoutDir"), FilePath("destinationDir"),
FilePath("templatePath"))])
def test_defaultBuildTarballsScriptBuilder(self):
"""
The default implementation of L{BuildTarballsScript.buildAllTarballs}
is L{buildAllTarballs}.
"""
tarballBuilder = BuildTarballsScript()
self.assertEqual(tarballBuilder.buildAllTarballs, buildAllTarballs)
def test_badNumberOfArgumentsToBuildTarballs(self):
"""
L{BuildTarballsScript.main} raises SystemExit when the wrong number of
arguments are passed.
"""
tarballBuilder = BuildTarballsScript()
self.assertRaises(SystemExit, tarballBuilder.main, [])
self.assertRaises(SystemExit, tarballBuilder.main,
["a", "b", "c", "d"])
def test_badNumberOfArgumentsToBuildNews(self):
"""
L{NewsBuilder.main} raises L{SystemExit} when other than 1 argument is
passed to it.
"""
newsBuilder = NewsBuilder()
self.assertRaises(SystemExit, newsBuilder.main, [])
self.assertRaises(SystemExit, newsBuilder.main, ["hello", "world"])
def test_buildNews(self):
"""
L{NewsBuilder.main} calls L{NewsBuilder.buildAll} with a L{FilePath}
instance constructed from the path passed to it.
"""
builds = []
newsBuilder = NewsBuilder()
newsBuilder.buildAll = builds.append
newsBuilder.main(["/foo/bar/baz"])
self.assertEqual(builds, [FilePath("/foo/bar/baz")])
|
vertcoin/electrum-vtc
|
refs/heads/master
|
gui/vtc/history_list.py
|
4
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import webbrowser
from util import *
from electrum_vtc.i18n import _
from electrum_vtc.util import block_explorer_URL, format_satoshis, format_time
from electrum_vtc.plugins import run_hook
from electrum_vtc.util import timestamp_to_datetime, profiler
TX_ICONS = [
"warning.png",
"warning.png",
"warning.png",
"unconfirmed.png",
"unconfirmed.png",
"clock1.png",
"clock2.png",
"clock3.png",
"clock4.png",
"clock5.png",
"confirmed.png",
]
class HistoryList(MyTreeWidget):
filter_columns = [2, 3, 4] # Date, Description, Amount
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
self.refresh_headers()
self.setColumnHidden(1, True)
def refresh_headers(self):
headers = ['', '', _('Date'), _('Description') , _('Amount'), _('Balance')]
fx = self.parent.fx
if fx and fx.show_history():
headers.extend(['%s '%fx.ccy + _('Amount'), '%s '%fx.ccy + _('Balance')])
self.update_headers(headers)
def get_domain(self):
'''Replaced in address_dialog.py'''
return self.wallet.get_addresses()
@profiler
def on_update(self):
self.wallet = self.parent.wallet
h = self.wallet.get_history(self.get_domain())
item = self.currentItem()
current_tx = item.data(0, Qt.UserRole).toString() if item else None
self.clear()
fx = self.parent.fx
if fx: fx.history_used_spot = False
for h_item in h:
tx_hash, height, conf, timestamp, value, balance = h_item
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
has_invoice = self.wallet.invoices.paid.get(tx_hash)
icon = QIcon(":icons/" + TX_ICONS[status])
v_str = self.parent.format_amount(value, True, whitespaces=True)
balance_str = self.parent.format_amount(balance, whitespaces=True)
label = self.wallet.get_label(tx_hash)
entry = ['', tx_hash, status_str, label, v_str, balance_str]
if fx and fx.show_history():
date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
for amount in [value, balance]:
text = fx.historical_value_str(amount, date)
entry.append(text)
item = QTreeWidgetItem(entry)
item.setIcon(0, icon)
item.setToolTip(0, str(conf) + " confirmation" + ("s" if conf != 1 else ""))
if has_invoice:
item.setIcon(3, QIcon(":icons/seal"))
for i in range(len(entry)):
if i>3:
item.setTextAlignment(i, Qt.AlignRight)
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
item.setTextAlignment(i, Qt.AlignVCenter)
if value < 0:
item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
self.insertTopLevelItem(0, item)
if current_tx == tx_hash:
self.setCurrentItem(item)
def on_doubleclick(self, item, column):
if self.permit_edit(item, column):
super(HistoryList, self).on_doubleclick(item, column)
else:
tx_hash = str(item.data(0, Qt.UserRole).toString())
tx = self.wallet.transactions.get(tx_hash)
self.parent.show_transaction(tx)
def update_labels(self):
root = self.invisibleRootItem()
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
txid = str(item.data(0, Qt.UserRole).toString())
label = self.wallet.get_label(txid)
item.setText(3, label)
def update_item(self, tx_hash, height, conf, timestamp):
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
icon = QIcon(":icons/" + TX_ICONS[status])
items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
if items:
item = items[0]
item.setIcon(0, icon)
item.setText(2, status_str)
def create_menu(self, position):
self.selectedIndexes()
item = self.currentItem()
if not item:
return
column = self.currentColumn()
tx_hash = str(item.data(0, Qt.UserRole).toString())
if not tx_hash:
return
if column is 0:
column_title = "ID"
column_data = tx_hash
else:
column_title = self.headerItem().text(column)
column_data = item.text(column)
tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
height, conf, timestamp = self.wallet.get_tx_height(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
is_unconfirmed = height <= 0
pr_key = self.wallet.invoices.paid.get(tx_hash)
menu = QMenu()
menu.addAction(_("Copy %s")%column_title, lambda: self.parent.app.clipboard().setText(column_data))
if column in self.editable_columns:
menu.addAction(_("Edit %s")%column_title, lambda: self.editItem(item, column))
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx))
if is_unconfirmed and tx:
rbf = is_mine and not tx.is_final()
if rbf:
menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx))
else:
child_tx = self.wallet.cpfp(tx, 0)
if child_tx:
menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx))
if pr_key:
menu.addAction(QIcon(":icons/seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key))
if tx_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
menu.exec_(self.viewport().mapToGlobal(position))
|
mpoindexter/kafka
|
refs/heads/trunk
|
tests/kafkatest/services/verifiable_producer.py
|
21
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.background_thread import BackgroundThreadService
import json
class VerifiableProducer(BackgroundThreadService):
logs = {
"producer_log": {
"path": "/mnt/producer.log",
"collect_default": False}
}
def __init__(self, context, num_nodes, kafka, topic, max_messages=-1, throughput=100000):
super(VerifiableProducer, self).__init__(context, num_nodes)
self.kafka = kafka
self.topic = topic
self.max_messages = max_messages
self.throughput = throughput
self.acked_values = []
self.not_acked_values = []
def _worker(self, idx, node):
cmd = self.start_cmd
self.logger.debug("VerifiableProducer %d command: %s" % (idx, cmd))
for line in node.account.ssh_capture(cmd):
line = line.strip()
data = self.try_parse_json(line)
if data is not None:
with self.lock:
if data["name"] == "producer_send_error":
data["node"] = idx
self.not_acked_values.append(int(data["value"]))
elif data["name"] == "producer_send_success":
self.acked_values.append(int(data["value"]))
@property
def start_cmd(self):
cmd = "/opt/kafka/bin/kafka-verifiable-producer.sh" \
" --topic %s --broker-list %s" % (self.topic, self.kafka.bootstrap_servers())
if self.max_messages > 0:
cmd += " --max-messages %s" % str(self.max_messages)
if self.throughput > 0:
cmd += " --throughput %s" % str(self.throughput)
cmd += " 2>> /mnt/producer.log | tee -a /mnt/producer.log &"
return cmd
@property
def acked(self):
with self.lock:
return self.acked_values
@property
def not_acked(self):
with self.lock:
return self.not_acked_values
@property
def num_acked(self):
with self.lock:
return len(self.acked_values)
@property
def num_not_acked(self):
with self.lock:
return len(self.not_acked_values)
def stop_node(self, node):
node.account.kill_process("VerifiableProducer", allow_fail=False)
# block until the corresponding thread exits
if len(self.worker_threads) >= self.idx(node):
# Need to guard this because stop is preemptively called before the worker threads are added and started
self.worker_threads[self.idx(node) - 1].join()
def clean_node(self, node):
node.account.ssh("rm -rf /mnt/producer.log", allow_fail=False)
def try_parse_json(self, string):
"""Try to parse a string as json. Return None if not parseable."""
try:
record = json.loads(string)
return record
except ValueError:
self.logger.debug("Could not parse as json: %s" % str(string))
return None
|
ARamsey118/Reverse-Javadoc
|
refs/heads/master
|
ClassFinder.py
|
1
|
#!/usr/bin/python3
import ReverseDoc
import os
from bs4 import BeautifulSoup
from urllib.request import urlopen
import urllib.error
class Java():
"""
Holds a java class with its location.
"""
def __init__(self):
self.name = ""
self.location = ""
def __str__(self):
return self.location
def findClasses(soup):
"""
Used to locate all classes in the javadoc. Also keeps track of each classes location to determine packages.
:param soup: HTML of the overview-tree page.
:return: list containing Java class objects
"""
classes = soup.find("h2", {"title": "Class Hierarchy"}) #gets the tag for the class list
java_class_list = list()
if classes:
classes = classes.findNext("ul") #move down to the class list
class_list = classes.find_all("a") #list of classes
for java_class in class_list:
new_class = Java()
new_class.name = str(java_class.find("span", {"class": "typeNameLink"}).text)
new_class.location = str(java_class.get("href"))
java_class_list.append(new_class)
return java_class_list
def findInterfaces(soup):
"""
Just like findClasses, but for interfaces
:param soup: HTML of the overview-tree page.
:return: list containing java class objects.
"""
#TODO combine with findClasses
interfaces = soup.find("h2", {"title": "Interface Hierarchy"})
interface_list = list()
if interfaces:
interfaces = interfaces.findNext("ul")
temp_list = interfaces.find_all("li")
for temp_class in temp_list:
new_class = Java()
new_class.name = str(temp_class.find("span", {"class": "typeNameLink"}).text)
new_class.location = str(temp_class.find("a").get("href"))
interface_list.append(new_class)
return interface_list
def main():
htmlfile = input("Enter url to main doc page: ")
output = input("Enter complete location to output src files: ")
# htmlfile = "http://www.cs.rit.edu/~csci142/Projects/01/doc/"
# htmlfile = "http://www.cs.rit.edu/~csci142/Labs/09/Doc/"
# output = "/home/andrew/Documents/AJ-College/Spring2015/CS142/9Mogwai/Lab9"
# htmlfile = "http://www.cs.rit.edu/~csci142/Labs/06/Doc/"
# output = "/home/andrew/java"
# output = "/home/andrew/Documents/AJ-College/Spring2015/CS142/6Graduation/Lab6"
# output = "/home/andrew/school/CS142/4BankAccount/Lab4"
# output = "/home/andrew/school/CS142/1Perp/Project1"
if htmlfile[-1] != "/": #add slashes as appropriate
htmlfile += "/"
if output[-1] != "/":
output += "/"
output += "src/" # put the output in a directory called src
htmltext = urllib.request.urlopen(htmlfile + "overview-tree.html").read()
soup = BeautifulSoup(htmltext)
class_list = findClasses(soup)
interface_list = findInterfaces(soup)
#TODO make this a function and pass it interface or class as appropriate
for java_class in class_list:
new_class = (
ReverseDoc.ReverseDoc(urllib.request.urlopen(htmlfile + java_class.location).read(), htmlfile), "except")
path = os.path.join(output, java_class.location.replace(".html", "") + ".java")
dirpath = path.rsplit("/", 1)[0] + "/"
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(path, "w") as f:
#TODO see if the decoding or printing can be done at creation to remove if else
if new_class[1] == "try":
f.write(new_class[0].decode("utf-8"))
else:
f.write(new_class[0].__repr__(False)) #telling it to print as a class
for interface in interface_list:
new_interface = (
ReverseDoc.ReverseDoc(urllib.request.urlopen(htmlfile + interface.location).read(), htmlfile), "except")
path = os.path.join(output, interface.location.replace(".html", "") + ".java")
dirpath = path.rsplit("/", 1)[0] + "/"
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(path, "w") as f:
if new_interface[1] == "try":
f.write(new_interface[0].decode("utf-8"))
else:
f.write(new_interface[0].__repr__(True)) #telling it to print as an interface
if __name__ == '__main__':
main()
|
phvu/DDF
|
refs/heads/master
|
python/setup.py
|
3
|
from setuptools import setup, find_packages
from setuptools.command.install import install
def __read_version(file_path):
import json
from collections import defaultdict
return defaultdict(str, json.loads(open(file_path).read()))
_version = __read_version('./ddf/version.json')['version']
setup(
cmdclass={'install': install},
name='ddf',
version=_version,
keywords='DDF',
author='ADATAO Inc.',
author_email='dev@adatao.com',
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
scripts=[],
url='http://ddf.io/',
license='LICENSE',
description='Distributed Data Frame',
long_description=open('README.md').read(),
install_requires=[
"py4j",
"pandas",
"tabulate"
],
entry_points="""
# -*- Entry points: -*-
"""
)
|
terryjbates/cookiecutter
|
refs/heads/master
|
tests/test_read_user_choice.py
|
27
|
# -*- coding: utf-8 -*-
import click
import pytest
from cookiecutter.prompt import read_user_choice
OPTIONS = ['hello', 'world', 'foo', 'bar']
EXPECTED_PROMPT = """Select varname:
1 - hello
2 - world
3 - foo
4 - bar
Choose from 1, 2, 3, 4"""
@pytest.mark.parametrize('user_choice, expected_value', enumerate(OPTIONS, 1))
def test_click_invocation(mocker, user_choice, expected_value):
choice = mocker.patch('click.Choice')
choice.return_value = click.Choice(OPTIONS)
prompt = mocker.patch('click.prompt')
prompt.return_value = '{}'.format(user_choice)
assert read_user_choice('varname', OPTIONS) == expected_value
prompt.assert_called_once_with(
EXPECTED_PROMPT,
type=click.Choice(OPTIONS),
default='1'
)
def test_raise_if_options_is_not_a_non_empty_list():
with pytest.raises(TypeError):
read_user_choice('foo', 'NOT A LIST')
with pytest.raises(ValueError):
read_user_choice('foo', [])
|
vmarkovtsev/django
|
refs/heads/master
|
django/contrib/contenttypes/admin.py
|
191
|
from __future__ import unicode_literals
from functools import partial
from django.contrib.admin.checks import InlineModelAdminChecks
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.forms import (
BaseGenericInlineFormSet, generic_inlineformset_factory,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.forms import ALL_FIELDS
from django.forms.models import modelform_defines_fields
class GenericInlineModelAdminChecks(InlineModelAdminChecks):
def _check_exclude_of_parent_model(self, obj, parent_model):
# There's no FK to exclude, so no exclusion checks are required.
return []
def _check_relation(self, obj, parent_model):
# There's no FK, but we do need to confirm that the ct_field and ct_fk_field are valid,
# and that they are part of a GenericForeignKey.
gfks = [
f for f in obj.model._meta.virtual_fields
if isinstance(f, GenericForeignKey)
]
if len(gfks) == 0:
return [
checks.Error(
"'%s.%s' has no GenericForeignKey." % (
obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E301'
)
]
else:
# Check that the ct_field and ct_fk_fields exist
try:
obj.model._meta.get_field(obj.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E302'
)
]
try:
obj.model._meta.get_field(obj.ct_fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_fk_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_fk_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E303'
)
]
# There's one or more GenericForeignKeys; make sure that one of them
# uses the right ct_field and ct_fk_field.
for gfk in gfks:
if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field:
return []
return [
checks.Error(
"'%s.%s' has no GenericForeignKey using content type field '%s' and object ID field '%s'." % (
obj.model._meta.app_label, obj.model._meta.object_name, obj.ct_field, obj.ct_fk_field
),
hint=None,
obj=obj.__class__,
id='admin.E304'
)
]
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
checks_class = GenericInlineModelAdminChecks
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.get_extra(request, obj),
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"min_num": self.get_min_num(request, obj),
"max_num": self.get_max_num(request, obj),
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
arborh/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/lib/source_remote.py
|
6
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Communicating tracebacks and source code with debug server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import socket
import grpc
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.lib import debug_service_pb2_grpc
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import tfprof_logger
def _load_debugged_source_file(file_path, source_file_proto):
file_stat = gfile.Stat(file_path)
source_file_proto.host = socket.gethostname()
source_file_proto.file_path = file_path
source_file_proto.last_modified = file_stat.mtime_nsec
source_file_proto.bytes = file_stat.length
try:
with gfile.Open(file_path, "r") as f:
source_file_proto.lines.extend(f.read().splitlines())
except IOError:
pass
def _string_to_id(string, string_to_id):
if string not in string_to_id:
string_to_id[string] = len(string_to_id)
return string_to_id[string]
def _format_origin_stack(origin_stack, call_traceback_proto):
"""Format a traceback stack for a `CallTraceback` proto.
Args:
origin_stack: The stack list as returned by `traceback.extract_stack()`.
call_traceback_proto: A `CallTraceback` proto whose fields are to be
populated.
"""
string_to_id = {}
string_to_id[None] = 0
for frame in origin_stack:
file_path, lineno, func_name, line_text = frame
call_traceback_proto.origin_stack.traces.add(
file_id=_string_to_id(file_path, string_to_id),
lineno=lineno,
function_id=_string_to_id(func_name, string_to_id),
line_id=_string_to_id(line_text, string_to_id))
id_to_string = call_traceback_proto.origin_id_to_string
for key, value in string_to_id.items():
id_to_string[value] = key if key is not None else ""
def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):
"""Extract source file paths outside TensorFlow Python library.
Args:
code_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack
traces.
id_to_string: A proto map from integer ids to strings.
Returns:
An iterable of source file paths outside the TensorFlow Python library.
"""
file_ids = set()
for code_def in code_defs:
for trace in code_def.traces:
file_ids.add(trace.file_id)
non_tf_files = (id_to_string[file_id] for file_id in file_ids)
non_tf_files = (
f for f in non_tf_files
if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))
return non_tf_files
def _send_call_tracebacks(destinations,
origin_stack,
is_eager_execution=False,
call_key=None,
graph=None,
send_source=True):
"""Send the tracebacks of a TensorFlow execution call.
To gRPC debug server(s). This applies to graph execution (`tf.Session.run()`)
calls and eager execution calls.
If `send_source`, also sends the underlying source files outside the
TensorFlow library.
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
`CallTraceback` proto payload will be sent to all the destinations.
origin_stack: The traceback stack for the origin of the execution call. For
graph execution, this is the traceback of the `tf.Session.run()`
invocation. For eager execution, this is the traceback of the Python
line that executes the eager opertion.
is_eager_execution: (`bool`) whether an eager execution call (i.e., not a
`tf.Session.run` or derived methods) is being sent.
call_key: The key of the execution call, as a string. For graph execution,
this is a string describing the feeds, fetches (and targets) names of the
`tf.Session.run` call. For eager execution, this is ignored.
graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),
which contains op tracebacks, if applicable.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
if not isinstance(destinations, list):
destinations = [destinations]
# Strip grpc:// prefix, if any is present.
destinations = [
dest[len(common.GRPC_URL_PREFIX):]
if dest.startswith(common.GRPC_URL_PREFIX) else dest
for dest in destinations]
call_type = (debug_service_pb2.CallTraceback.EAGER_EXECUTION
if is_eager_execution
else debug_service_pb2.CallTraceback.GRAPH_EXECUTION)
graph_traceback = tfprof_logger.merge_default_with_oplog(
graph, add_trainable_var=False) if graph else None
call_traceback = debug_service_pb2.CallTraceback(
call_type=call_type, call_key=call_key, graph_traceback=graph_traceback,
graph_version=graph.version if graph else None)
_format_origin_stack(origin_stack, call_traceback)
if send_source:
source_file_paths = set()
source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
(log_entry.code_def for log_entry
in call_traceback.graph_traceback.log_entries),
call_traceback.graph_traceback.id_to_string))
source_file_paths.update(_source_file_paths_outside_tensorflow_py_library(
[call_traceback.origin_stack], call_traceback.origin_id_to_string))
debugged_source_files = []
for file_path in source_file_paths:
source_files = debug_pb2.DebuggedSourceFiles()
_load_debugged_source_file(
file_path, source_files.source_files.add())
debugged_source_files.append(source_files)
for destination in destinations:
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
channel = grpc.insecure_channel(destination, options=no_max_message_sizes)
stub = debug_service_pb2_grpc.EventListenerStub(channel)
stub.SendTracebacks(call_traceback)
if send_source:
for source_files in debugged_source_files:
stub.SendSourceFiles(source_files)
def send_graph_tracebacks(destinations,
run_key,
origin_stack,
graph,
send_source=True):
"""Send the tracebacks of a graph execution call to debug server(s).
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
`CallTraceback` proto payload will be sent to all the destinations.
run_key: A string describing the feeds, fetches (and targets) names of the
`tf.Session.run` call.
origin_stack: The traceback of the `tf.Session.run()` invocation.
graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),
which contains op tracebacks.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
_send_call_tracebacks(
destinations, origin_stack, is_eager_execution=False, call_key=run_key,
graph=graph, send_source=send_source)
def send_eager_tracebacks(destinations,
origin_stack,
send_source=True):
"""Send the tracebacks of an eager execution call to debug server(s).
Args:
destinations: gRPC destination addresses, a `str` or a `list` of `str`s,
e.g., "localhost:4242". If a `list`, gRPC requests containing the same
origin_stack: The traceback of the eager operation invocation.
send_source: Whether the source files involved in the op tracebacks but
outside the TensorFlow library are to be sent.
"""
_send_call_tracebacks(
destinations, origin_stack, is_eager_execution=True,
send_source=send_source)
|
google/layered-scene-inference
|
refs/heads/master
|
lsi/geometry/homography.py
|
1
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow utils for image transformations via homographies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lsi.geometry import sampling
from lsi.nnutils import helpers as nn_helpers
import tensorflow as tf
def inv_homography(k_s, k_t, rot, t, n_hat, a):
"""Computes inverse homography matrix.
Args:
k_s: intrinsics for source cameras, are [...] X 3 X 3 matrices
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: [...] X 1 X 3, plane normal w.r.t source camera frame
a: [...] X 1 X 1, plane equation displacement
Returns:
homography: [...] X 3 X 3 inverse homography matrices
"""
with tf.name_scope('inv_homography'):
rot_t = nn_helpers.transpose(rot)
k_t_inv = tf.matrix_inverse(k_t, name='k_t_inv')
denom = a - tf.matmul(tf.matmul(n_hat, rot_t), t)
numerator = tf.matmul(tf.matmul(tf.matmul(rot_t, t), n_hat), rot_t)
inv_hom = tf.matmul(
tf.matmul(k_s, rot_t + nn_helpers.divide_safe(numerator, denom)),
k_t_inv,
name='inv_hom')
return inv_hom
def inv_homography_dmat(k_t, rot, t, n_hat, a):
"""Computes M where M*(u,v,1) = d_t.
Args:
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: [...] X 1 X 3, plane normal w.r.t source camera frame
a: [...] X 1 X 1, plane equation displacement
Returns:
d_mat: [...] X 1 X 3 matrices
"""
with tf.name_scope('inv_homography'):
rot_t = nn_helpers.transpose(rot)
k_t_inv = tf.matrix_inverse(k_t, name='k_t_inv')
denom = a - tf.matmul(tf.matmul(n_hat, rot_t), t)
d_mat = nn_helpers.divide_safe(
-1 * tf.matmul(tf.matmul(n_hat, rot_t), k_t_inv), denom, name='dmat')
return d_mat
def normalize_homogeneous(pts_coords):
"""Converts homogeneous coordinates to regular coordinates.
Args:
pts_coords : [...] X n_dims_coords+1; Homogeneous coordinates.
Returns:
pts_coords_uv_norm : [...] X n_dims_coords;
normal coordinates after dividing by the last entry.
"""
with tf.name_scope('normalize_homogeneous'):
pts_size = pts_coords.get_shape().as_list()
n_dims = len(pts_size)
n_dims_coords = pts_size[-1] - 1
pts_coords_uv, pts_coords_norm = tf.split(
pts_coords, [n_dims_coords, 1], axis=n_dims - 1)
return nn_helpers.divide_safe(pts_coords_uv, pts_coords_norm)
def transform_plane_imgs(imgs, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a):
"""Transforms input imgs via homographies for corresponding planes.
Args:
imgs: are [...] X H_s X W_s X C
pixel_coords_trg: [...] X H_t X W_t X 3; pixel (u,v,1) coordinates.
k_s: intrinsics for source cameras, are [...] X 3 X 3 matrices
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: [...] X 1 X 3, plane normal w.r.t source camera frame
a: [...] X 1 X 1, plane equation displacement
Returns:
[...] X H_t X W_t X C images after bilinear sampling from input.
Coordinates outside the image are sampled as 0.
"""
with tf.name_scope('transform_plane_imgs'):
hom_t2s_planes = inv_homography(k_s, k_t, rot, t, n_hat, a)
pixel_coords_t2s = nn_helpers.transform_pts(pixel_coords_trg,
hom_t2s_planes)
pixel_coords_t2s = normalize_homogeneous(pixel_coords_t2s)
imgs_s2t = sampling.bilinear_wrapper(imgs, pixel_coords_t2s)
return imgs_s2t
def transform_plane_eqns(rot, t, n_hat, a):
"""Transforms plane euqations according to frame transformation.
Args:
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: [...] X 1 X 3, plane normal w.r.t source camera frame
a: [...] X 1 X 1, plane equation displacement
Returns:
n_hat_t: [...] X 1 X 3, plane normal w.r.t target camera frame
a_t: [...] X 1 X 1, plane plane equation displacement
"""
with tf.name_scope('transform_plane_eqns'):
rot_t = nn_helpers.transpose(rot)
n_hat_t = tf.matmul(n_hat, rot_t)
a_t = a - tf.matmul(n_hat, tf.matmul(rot_t, t))
return n_hat_t, a_t
def trg_disp_maps(pixel_coords_trg, k_t, rot, t, n_hat, a):
"""Computes pixelwise inverse depth for target pixels via plane equations.
Args:
pixel_coords_trg: [...] X H_t X W_t X 3; pixel (u,v,1) coordinates.
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: [...] X 1 X 3, plane normal w.r.t source camera frame
a: [...] X 1 X 1, plane equation displacement
Returns:
[...] X H_t X W_t X 1 images corresponding to inverse depth at each pixel
"""
with tf.name_scope('trg_disp_maps'):
dmats_t = inv_homography_dmat(k_t, rot, t, n_hat, a) # size: [...] X 1 X 3
disp_t = tf.reduce_sum(
tf.expand_dims(dmats_t, -2) * pixel_coords_trg, axis=-1, keep_dims=True)
return disp_t
|
vincepandolfo/django
|
refs/heads/master
|
django/views/generic/detail.py
|
65
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
if obj._deferred:
obj = obj._meta.proxy_for_model
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
object_meta = self.object._meta
if self.object._deferred:
object_meta = self.object._meta.proxy_for_model._meta
names.append("%s/%s%s.html" % (
object_meta.app_label,
object_meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
fuhongliang/erpnext
|
refs/heads/develop
|
erpnext/patches/v4_0/update_tax_amount_after_discount.py
|
120
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("accounts", "doctype", "sales_taxes_and_charges")
docs_with_discount_amount = frappe._dict()
for dt in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
records = frappe.db.sql_list("""select name from `tab%s`
where ifnull(discount_amount, 0) > 0 and docstatus=1""" % dt)
docs_with_discount_amount[dt] = records
for dt, discounted_records in docs_with_discount_amount.items():
frappe.db.sql("""update `tabSales Taxes and Charges`
set tax_amount_after_discount_amount = tax_amount
where parenttype = %s and parent not in (%s)""" %
('%s', ', '.join(['%s']*(len(discounted_records)+1))),
tuple([dt, ''] + discounted_records))
|
klebercode/canaa
|
refs/heads/master
|
canaa/current_path.py
|
1
|
from django.conf.global_settings import LANGUAGES
class CurrentPathMiddleware:
def process_request(self, request):
l_path = request.path.split('/')
request.session['no_lang_path'] = request.path
codes = []
for code, name in LANGUAGES:
codes.append(code)
if l_path[1] in codes:
del l_path[1]
no_lang_path = '/'.join(l_path)
request.session['no_lang_path'] = no_lang_path
|
0x90sled/catapult
|
refs/heads/master
|
third_party/gsutil/gslib/project_id.py
|
38
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper module for Google Cloud Storage project IDs."""
from __future__ import absolute_import
import boto
from gslib.cloud_api import ProjectIdException
GOOG_PROJ_ID_HDR = 'x-goog-project-id'
def PopulateProjectId(project_id=None):
"""Fills in a project_id from the boto config file if one is not provided."""
if not project_id:
default_id = boto.config.get_value('GSUtil', 'default_project_id')
if not default_id:
raise ProjectIdException('MissingProjectId')
return default_id
return project_id
|
OscarPDR/projects_morelab
|
refs/heads/master
|
projects/urls.py
|
1
|
from django.conf.urls import patterns, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = patterns('',
url(r'^$', 'projects.views.project_index', name='project_index'),
url(r'^add/$', 'projects.views.add_project', name='add_project'),
url(r'^info/(\S+)$', 'projects.views.project_info', name='project_info'),
url(r'^email/(\S+)$', 'projects.views.email_project', name='email_project'),
url(r'^edit/(\S+)$', 'projects.views.edit_project', name='edit_project'),
url(r'^delete/(\S+)$', 'projects.views.delete_project', name='delete_project'),
url(r'^delete_employee/(?P<employee_slug>\S+)/from/(?P<project_slug>\S+)$', 'projects.views.delete_employee_from_project', name='delete_employee_from_project'),
url(r'^delete_organization/(?P<organization_slug>\S+)/from/(?P<project_slug>\S+)$', 'projects.views.delete_organization_from_project', name='delete_organization_from_project'),
)
urlpatterns += staticfiles_urlpatterns()
|
ujvl/ray-ng
|
refs/heads/master
|
doc/examples/lbfgs/driver.py
|
2
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import scipy.optimize
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import ray
import ray.experimental.tf_utils
class LinearModel(object):
"""Simple class for a one layer neural network.
Note that this code does not initialize the network weights. Instead
weights are set via self.variables.set_weights.
Example:
net = LinearModel([10, 10])
weights = [np.random.normal(size=[10, 10]),
np.random.normal(size=[10])]
variable_names = [v.name for v in net.variables]
net.variables.set_weights(dict(zip(variable_names, weights)))
Attributes:
x (tf.placeholder): Input vector.
w (tf.Variable): Weight matrix.
b (tf.Variable): Bias vector.
y_ (tf.placeholder): Input result vector.
cross_entropy (tf.Operation): Final layer of network.
cross_entropy_grads (tf.Operation): Gradient computation.
sess (tf.Session): Session used for training.
variables (TensorFlowVariables): Extracted variables and methods to
manipulate them.
"""
def __init__(self, shape):
"""Creates a LinearModel object."""
x = tf.placeholder(tf.float32, [None, shape[0]])
w = tf.Variable(tf.zeros(shape))
b = tf.Variable(tf.zeros(shape[1]))
self.x = x
self.w = w
self.b = b
y = tf.nn.softmax(tf.matmul(x, w) + b)
y_ = tf.placeholder(tf.float32, [None, shape[1]])
self.y_ = y_
cross_entropy = tf.reduce_mean(
-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
self.cross_entropy = cross_entropy
self.cross_entropy_grads = tf.gradients(cross_entropy, [w, b])
self.sess = tf.Session()
# In order to get and set the weights, we pass in the loss function to
# Ray's TensorFlowVariables to automatically create methods to modify
# the weights.
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
cross_entropy, self.sess)
def loss(self, xs, ys):
"""Computes the loss of the network."""
return float(
self.sess.run(
self.cross_entropy, feed_dict={
self.x: xs,
self.y_: ys
}))
def grad(self, xs, ys):
"""Computes the gradients of the network."""
return self.sess.run(
self.cross_entropy_grads, feed_dict={
self.x: xs,
self.y_: ys
})
@ray.remote
class NetActor(object):
def __init__(self, xs, ys):
os.environ["CUDA_VISIBLE_DEVICES"] = ""
with tf.device("/cpu:0"):
self.net = LinearModel([784, 10])
self.xs = xs
self.ys = ys
# Compute the loss on a batch of data.
def loss(self, theta):
net = self.net
net.variables.set_flat(theta)
return net.loss(self.xs, self.ys)
# Compute the gradient of the loss on a batch of data.
def grad(self, theta):
net = self.net
net.variables.set_flat(theta)
gradients = net.grad(self.xs, self.ys)
return np.concatenate([g.flatten() for g in gradients])
def get_flat_size(self):
return self.net.variables.get_flat_size()
# Compute the loss on the entire dataset.
def full_loss(theta):
theta_id = ray.put(theta)
loss_ids = [actor.loss.remote(theta_id) for actor in actors]
return sum(ray.get(loss_ids))
# Compute the gradient of the loss on the entire dataset.
def full_grad(theta):
theta_id = ray.put(theta)
grad_ids = [actor.grad.remote(theta_id) for actor in actors]
# The float64 conversion is necessary for use with fmin_l_bfgs_b.
return sum(ray.get(grad_ids)).astype("float64")
if __name__ == "__main__":
ray.init()
# From the perspective of scipy.optimize.fmin_l_bfgs_b, full_loss is simply
# a function which takes some parameters theta, and computes a loss.
# Similarly, full_grad is a function which takes some parameters theta, and
# computes the gradient of the loss. Internally, these functions use Ray to
# distribute the computation of the loss and the gradient over the data
# that is represented by the remote object IDs x_batches and y_batches and
# which is potentially distributed over a cluster. However, these details
# are hidden from scipy.optimize.fmin_l_bfgs_b, which simply uses it to run
# the L-BFGS algorithm.
# Load the mnist data and turn the data into remote objects.
print("Downloading the MNIST dataset. This may take a minute.")
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
num_batches = 10
batch_size = mnist.train.num_examples // num_batches
batches = [mnist.train.next_batch(batch_size) for _ in range(num_batches)]
print("Putting MNIST in the object store.")
actors = [NetActor.remote(xs, ys) for (xs, ys) in batches]
# Initialize the weights for the network to the vector of all zeros.
dim = ray.get(actors[0].get_flat_size.remote())
theta_init = 1e-2 * np.random.normal(size=dim)
# Use L-BFGS to minimize the loss function.
print("Running L-BFGS.")
result = scipy.optimize.fmin_l_bfgs_b(
full_loss, theta_init, maxiter=10, fprime=full_grad, disp=True)
|
zqzhang/crosswalk-test-suite
|
refs/heads/master
|
misc/webdriver-w3c-tests/modal/alerts_test.py
|
12
|
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir)))
import base_test
from client.exceptions import ElementNotVisibleException, NoSuchAlertException
from client.wait import WebDriverWait
class AlertsTest(base_test.WebDriverBaseTest):
def setUp(self):
self.wait = WebDriverWait(
self.driver,
5,
ignored_exceptions=[NoSuchAlertException])
self.driver.get(self.webserver.where_is('modal/res/alerts.html'))
def tearDown(self):
try:
self.driver.switch_to_alert().dismiss()
except NoSuchAlertException:
pass
# Alerts
def test_should_allow_user_to_accept_an_alert(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
self.driver.get_current_url()
def test_should_allow_user_to_accept_an_alert_with_no_text(self):
self.driver.find_element_by_id('empty-alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
self.driver.get_current_url()
def test_should_allow_user_to_dismiss_an_alert(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.dismiss()
self.driver.get_current_url()
def test_should_allow_user_to_get_text_of_an_alert(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals('cheese', value)
def test_setting_the_value_of_an_alert_throws(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
with self.assertRaises(ElementNotVisibleException):
alert.send_keys('cheese')
alert.accept()
def test_alert_should_not_allow_additional_commands_if_dismissed(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
with self.assertRaises(NoSuchAlertException):
alert.get_text()
# Prompts
def test_should_allow_user_to_accept_a_prompt(self):
self.driver.find_element_by_id('prompt').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == '')
def test_should_allow_user_to_dismiss_a_prompt(self):
self.driver.find_element_by_id('prompt').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.dismiss()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'null')
def test_should_allow_user_to_set_the_value_of_a_prompt(self):
self.driver.find_element_by_id('prompt').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.send_keys('cheese')
alert.accept()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'cheese')
def test_should_allow_user_to_get_text_of_a_prompt(self):
self.driver.find_element_by_id('prompt').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals('Enter something', value)
def test_prompt_should_not_allow_additional_commands_if_dismissed(self):
self.driver.find_element_by_id('prompt').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
with self.assertRaises(NoSuchAlertException):
alert.get_text()
def test_prompt_should_use_default_value_if_no_keys_sent(self):
self.driver.find_element_by_id('prompt-with-default').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'This is a default value')
def test_prompt_should_have_null_value_if_dismissed(self):
self.driver.find_element_by_id('prompt-with-default').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.dismiss()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'null')
# Confirmations
def test_should_allow_user_to_accept_a_confirm(self):
self.driver.find_element_by_id('confirm').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'true')
def test_should_allow_user_to_dismiss_a_confirm(self):
self.driver.find_element_by_id('confirm').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.dismiss()
self.wait.until(
lambda x: x.find_element_by_id('text').get_text() == 'false')
def test_setting_the_value_of_a_confirm_throws(self):
self.driver.find_element_by_id('confirm').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
with self.assertRaises(ElementNotVisibleException):
alert.send_keys('cheese')
alert.accept()
def test_should_allow_user_to_get_text_of_a_confirm(self):
self.driver.find_element_by_id('confirm').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals('cheese', value)
def test_confirm_should_not_allow_additional_commands_if_dismissed(self):
self.driver.find_element_by_id('confirm').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
alert.accept()
with self.assertRaises(NoSuchAlertException):
alert.get_text()
def test_switch_to_missing_alert_fails(self):
with self.assertRaises(NoSuchAlertException):
self.driver.switch_to_alert()
if __name__ == '__main__':
unittest.main()
|
karanisverma/feature_langpop
|
refs/heads/master
|
librarian/migrations/main/04_add_content_license_and_source_columns.py
|
2
|
SQL = """
alter table zipballs add column keep_formatting boolean not null default 0;
alter table zipballs add column is_core boolean not null default 0;
alter table zipballs add column is_partner boolean not null default 0;
alter table zipballs add column is_sponsored boolean not null default 0;
alter table zipballs add column partner varchar;
alter table zipballs add column license varchar;
"""
def up(db, config):
db.executescript(SQL)
|
SHDShim/pytheos
|
refs/heads/master
|
examples/6_p_scale_test_Yokoo_Pt.py
|
1
|
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('cat', '0Source_Citation.txt')
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# %matplotlib notebook # for interactive
# For high dpi displays.
# In[3]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# # 0. General note
# This example compares pressure calculated from `pytheos` and original publication for the platinum scale by Yokoo 2009.
# # 1. Global setup
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
# # 3. Compare
# In[5]:
eta = np.linspace(1., 0.60, 21)
print(eta)
# In[6]:
yokoo_pt = eos.platinum.Yokoo2009()
# In[7]:
yokoo_pt.print_equations()
# In[8]:
yokoo_pt.print_equations()
# In[9]:
yokoo_pt.print_parameters()
# In[10]:
v0 = 60.37930856339099
# In[11]:
yokoo_pt.three_r
# In[12]:
v = v0 * (eta)
temp = 3000.
# In[13]:
p = yokoo_pt.cal_p(v, temp * np.ones_like(v))
# <img src='./tables/Yokoo_Pt.png'>
# In[14]:
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f}".format(eta_i, p_i))
# It is alarming that even 300 K isotherm does not match with table value. The difference is 1%.
# In[15]:
v = yokoo_pt.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print(1.-(v/v0))
|
boyombo/django-stations
|
refs/heads/master
|
stations/drugshare/__init__.py
|
12133432
| |
erdavila/M-Tree
|
refs/heads/master
|
py/mtree/tests/__init__.py
|
12133432
| |
dotKom/onlineweb4
|
refs/heads/develop
|
apps/splash/migrations/0008_auto_20160812_2053.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-12 18:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('splash', '0007_auto_20160812_2053'),
]
operations = [
migrations.RemoveField(
model_name='splashevent',
name='created',
),
migrations.RemoveField(
model_name='splashevent',
name='modified',
),
migrations.RemoveField(
model_name='splashevent',
name='splash_year',
),
migrations.DeleteModel(
name='SplashYear',
),
]
|
seanwestfall/django
|
refs/heads/master
|
tests/urlpatterns_reverse/erroneous_views_module.py
|
319
|
import non_existent # NOQA
def erroneous_view(request):
pass
|
ElricleNecro/LibThese
|
refs/heads/master
|
LibThese/Pretty/W0_py.py
|
1
|
import abc
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import widgets as w
from astropy import constants as const
from LibThese.Models.King import DimKing as King
def CalcMtot(w0, rc, sig_v):
test = King(w0, rc, sig_v, G=const.G.value)
test.DimSolve2()
test.DimXAxis()
test.CalcRho()
test.CalcMtot()
return test.Mtot
pCalcMtot = np.vectorize(CalcMtot)
def W0MtotCube(w0 : np.ndarray, rc_range : tuple, sigv_range : tuple, nb=25):
res = list()
for w in w0:
rc, sv = np.meshgrid(\
np.linspace(rc_range[0], rc_range[1], nb), \
np.linspace(sigv_range[0], sigv_range[1], nb) \
)
res.append( [CalcMtot(w, rc, sv), rc, sv] )
return res
class WithCursor(object, metaclass=abc.ABCMeta):
def __init__(self, ax=None, \
slname='slider', slmin=0, slmax=10, slinit=5, \
axcolor='lightgoldenrodyellow'):
if ax is None:
self.fig = plt.figure()
self.fig.subplots_adjust(left=0.25, bottom=0.25)
self.ax = self.fig.add_subplot(111)
self.slname = slname
self.slmin = slmin
self.slmax = slmax
self.slinit = slinit
self._Create_SAxes(axcolor)
self._Create_Slider()
self.slider.on_changed(self.update)
def _Create_SAxes(self, axcolor):
self._slax = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
def _Create_Slider(self):
self.slider = w.Slider(self._slax, \
self.slname, self.slmin, self.slmax, \
valinit=self.slinit)
@abc.abstractmethod
def update(self, val):
raise NotImplementedError
class W0(WithCursor):
"""
W0.rcr ::
W0.sig_vr ::
"""
def __init__(self, w0r, rcr, sig_vr, **kwargs):
self._rcr = rcr
self._sig_vr = sig_vr
self._w0r = w0r
kwargs["slmin"] = self._w0r[0]
kwargs["slmax"] = self._w0r[1]
kwargs["slinit"] = self._w0r[0]
kwargs["slname"] = r"$W_0$"
super(W0, self).__init__(**kwargs)
self.Populate()
def Populate(self):
self.rc, self.sig_v = np.meshgrid(np.linspace(self.rcr[0], self.rcr[1], self.rcr[2]), \
np.linspace(self.sig_vr[0], self.sig_vr[1], self.sig_vr[2]))
def update(self, val):
mtot = CalcMtot(self.slider.val, self.rc, self.sig_v)
self.ax.pcolor(self.rc, self.sig_v, mtot.T)
self.ax.figure.canvas.draw()
@property
def rcr(self):
return self._rcr
@rcr.setter
def rcr(self, val):
self._rcr = val
self.Populate()
@property
def sig_vr(self):
return self._sig_vr
@sig_vr.setter
def sig_vr(self, val):
self._sig_vr = val
self.Populate()
class W0Cube(WithCursor):
def __init__(self, cube, **kwargs):
self.cube = cube
super(W0, self).__init__(**kwargs)
def update(self, val):
self.ax.pcolor(self.cube[val][1], self.cube[val][2], self.cube[val][0].T)
self.ax.figure.canvas.draw()
if __name__ == '__main__':
nb = 50
rc, sig_v = np.meshgrid( \
np.linspace(1*const.pc.value, 10.*const.pc.value, nb), \
np.linspace(290, 2900, nb)
)
mtot = CalcMtot(12.5, rc, sig_v)
|
inn1983/xbmca10-empatzero
|
refs/heads/master
|
tools/EventClients/examples/python/example_notification.py
|
228
|
#!/usr/bin/python
# This is a simple example showing how you can show a notification
# window with a custom icon inside XBMC. It could be used by mail
# monitoring apps, calendar apps, etc.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
packet = PacketHELO("Email Notifier", ICON_NONE)
packet.send(sock, addr)
# wait for 5 seconds
time.sleep (5)
packet = PacketNOTIFICATION("New Mail!", # caption
"RE: Check this out", # message
ICON_PNG, # optional icon type
"../../icons/mail.png") # icon file (local)
packet.send(sock, addr)
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
|
awacha/cct
|
refs/heads/master
|
cct/processing/mainwindow/exporttool/__init__.py
|
1
|
from .exporttool import ExportTool
|
anant-dev/django
|
refs/heads/master
|
django/contrib/admin/options.py
|
92
|
import copy
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
clean_lookup = LOOKUP_SEP.join(relation_parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp.min.js',
]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Returns a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a change message from a changed object.
"""
change_message = []
if add:
change_message.append(_('Added.'))
elif form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'value': value,
'obj': obj,
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': escape(value),
'obj': escapejs(obj),
'new_value': escape(new_value),
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
object_id = None
obj = None
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
makerbot/s3g
|
refs/heads/master
|
tests/pi_test_S3g.py
|
2
|
import os
import sys
lib_path = os.path.abspath('./')
sys.path.insert(0, lib_path)
import uuid
import unittest
import io
import struct
import mock
import threading
import serial
from makerbot_driver import Writer, constants, s3g, errors, Encoder
class TestS3gPrintToFileType(unittest.TestCase):
def test_print_to_file_type_s3g(self):
print_to_file_type = 's3g'
r = s3g()
r.set_print_to_file_type(print_to_file_type)
r.writer = mock.Mock()
r.queue_extended_point_classic = mock.Mock()
point = [0, 1, 2, 3, 4]
dda_speed = 50
dda_rate = 1000000.0 / float(dda_speed)
e_distance = 100
feedrate = 200
relative_axes = []
r.queue_extended_point(point, dda_speed, e_distance, feedrate)
r.queue_extended_point_classic.assert_called_once_with(
point, dda_speed)
def test_print_to_file_type_x3g(self):
print_to_file_type = 'x3g'
r = s3g()
r.set_print_to_file_type(print_to_file_type)
r.queue_extended_point_x3g = mock.Mock()
point = [0, 1, 2, 3, 4]
dda_speed = 50
dda_rate = 1000000.0 / float(dda_speed)
e_distance = 100
feedrate = 200
relative_axes = []
r.queue_extended_point(point, dda_speed, e_distance, feedrate)
r.queue_extended_point_x3g.assert_called_once_with(
point, dda_rate, relative_axes, e_distance, feedrate)
class TestFromFileName(unittest.TestCase):
def setUp(self):
self.condition = threading.Condition()
self.obj = s3g.from_filename(None, self.condition)
def tearDown(self):
self.obj = None
def test_from_filename_gets_correct_objects(self):
self.assertTrue(isinstance(self.obj, s3g))
self.assertTrue(isinstance(self.obj.writer, Writer.StreamWriter))
self.assertTrue(isinstance(self.obj.writer.file, serial.Serial))
def test_from_filename_minimal_constructor(self):
self.assertEqual(self.obj.writer.file.port, None)
self.assertEqual(self.obj.writer.file.baudrate, 115200)
self.assertEqual(self.obj.writer.file.timeout, .2)
def test_from_filename_use_all_parameters(self):
baudrate = 9800
timeout = 5
self.obj = s3g.from_filename(None, self.condition, baudrate=9800, timeout=5)
self.assertEqual(self.obj.writer.file.baudrate, baudrate)
self.assertEqual(self.obj.writer.file.timeout, timeout)
self.assertEqual(self.condition, self.obj.writer._condition)
def test_from_filename_none_case(self):
""" test the from_filename s3g factory."""
self.assertRaises(serial.serialutil.SerialException, s3g.from_filename,
"/dev/this_is_hopefully_not_a_real_port", self.condition)
class S3gTestsFirmwareX3g(unittest.TestCase):
"""
Emulate a machine
"""
def setUp(self):
self.r = s3g()
self.r.set_print_to_file_type('x3g')
self.outputstream = io.BytesIO(
) # Stream that we will send responses on
self.inputstream = io.BytesIO(
) # Stream that we will receive commands on
file = io.BufferedRWPair(self.outputstream, self.inputstream)
self.condition = threading.Condition()
writer = Writer.StreamWriter(file, self.condition)
self.r.writer = writer
def tearDown(self):
self.r = None
self.outputstream = None
self.inputstream = None
self.file = None
def test_queue_extended_point(self):
point = [1, 2, 3, 4, 5]
dda = 50
dda_rate = 1000000 / dda
relative_axes = ['X']
distance = 123.0
feedrate = 100
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.queue_extended_point(
point, dda, distance, feedrate, relative_axes=relative_axes)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'QUEUE_EXTENDED_POINT_ACCELERATED'])
self.assertEqual(payload[1:5], Encoder.encode_int32(point[0]))
self.assertEqual(payload[5:9], Encoder.encode_int32(point[1]))
self.assertEqual(payload[9:13], Encoder.encode_int32(point[2]))
self.assertEqual(payload[13:17], Encoder.encode_int32(point[3]))
self.assertEqual(payload[17:21], Encoder.encode_int32(point[4]))
self.assertEqual(payload[21:25], Encoder.encode_uint32(dda_rate))
self.assertEqual(payload[25], Encoder.encode_axes(relative_axes))
self.assertEqual(payload[26:30], struct.pack('<f', float(distance)))
self.assertEqual(
payload[30:32], Encoder.encode_int16(int(float(feedrate * 64.0))))
class S3gTestsFirmwareClassic(unittest.TestCase):
"""
Emulate a machine
"""
def setUp(self):
self.r = s3g()
self.r.set_print_to_file_type('s3g')
self.outputstream = io.BytesIO(
) # Stream that we will send responses on
self.inputstream = io.BytesIO(
) # Stream that we will receive commands on
file = io.BufferedRWPair(self.outputstream, self.inputstream)
self.condition = threading.Condition()
writer = Writer.StreamWriter(file, self.condition)
self.r.writer = writer
def tearDown(self):
self.r = None
self.outputstream = None
self.inputstream = None
self.file = None
def test_queue_extended_point_x3g(self):
point = [1, 2, 3, 4, 5]
dda = 50
relative_axes = ['X']
distance = 123.0
feedrate = 100
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.queue_extended_point_x3g(
point, dda, relative_axes, distance, feedrate)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'QUEUE_EXTENDED_POINT_ACCELERATED'])
self.assertEqual(payload[1:5], Encoder.encode_int32(point[0]))
self.assertEqual(payload[5:9], Encoder.encode_int32(point[1]))
self.assertEqual(payload[9:13], Encoder.encode_int32(point[2]))
self.assertEqual(payload[13:17], Encoder.encode_int32(point[3]))
self.assertEqual(payload[17:21], Encoder.encode_int32(point[4]))
self.assertEqual(payload[21:25], Encoder.encode_uint32(dda))
self.assertEqual(payload[25], Encoder.encode_axes(relative_axes))
self.assertEqual(payload[26:30], struct.pack('<f', float(distance)))
self.assertEqual(
payload[30:32], Encoder.encode_int16(int(float(feedrate * 64.0))))
def test_set_servo_2_position(self):
tool = 2
theta = 50
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_servo2_position(tool, theta)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], tool)
self.assertEqual(payload[2], constants.slave_action_command_dict[
'SET_SERVO_2_POSITION'])
self.assertEqual(payload[3], 1)
self.assertEqual(payload[4], theta)
def test_toggle_abp(self):
tool = 2
state = True
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toggle_ABP(tool, state)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], tool)
self.assertEqual(
payload[2], constants.slave_action_command_dict['TOGGLE_ABP'])
self.assertEqual(payload[3], 1)
self.assertEqual(payload[4], 1)
def set_motor1_speed_pwm(self):
tool = 2
pwm = 128
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_motor1_speed_pwm(tool, pwm)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], tool)
self.assertEqual(payload[2], constants.slave_action_command_dict[
'SET_MOTOR_1_SPEED_PWM'])
self.assertEqual(payload[3], 1)
self.assertEqual(payload[4], 128)
def test_set_motor1_direction(self):
tool = 2
direction = True
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_motor1_direction(tool, direction)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], tool)
self.assertEqual(payload[2], constants.slave_action_command_dict[
'SET_MOTOR_1_DIRECTION'])
self.assertEqual(payload[3], 1)
self.assertEqual(payload[4], direction)
def test_get_vid_pid_iface(self):
vid = 0x0000
pid = 0xFFFF
port = '/dev/tty.ACM0'
gMachineDetector = makerbot_driver.get_gMachineDetector()
gMachineDetector.get_available_machines = mock.Mock(return_value={
port: {
'VID': vid,
'PID': pid,
}
})
some_port = mock.Mock()
some_port.port = port
self.r.writer = makerbot_driver.Writer.StreamWriter(some_port)
got_vid_pid = self.r.get_vid_pid_iface()
self.assertEqual(got_vid_pid, (vid, pid))
def test_get_verified_status_unverified(self):
vid = 0x0000
pid = 0xFFFF
self.r.get_vid_pid = mock.Mock(return_value=(vid, pid))
self.assertFalse(self.r.get_verified_status())
def test_get_verified_status_verified(self):
vid = makerbot_driver.vid_pid[0]
pid = makerbot_driver.vid_pid[1]
self.r.get_vid_pid = mock.Mock(return_value=(vid, pid))
self.assertTrue(self.r.get_verified_status())
def test_get_toolcount(self):
toolcount = 3
eeprom_offset_toolcount = 0x0042
eeprom_length_toolcount = 1
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(toolcount)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEqual(self.r.get_toolhead_count(), toolcount)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['READ_FROM_EEPROM'])
self.assertEqual(
payload[1:3], Encoder.encode_uint16(eeprom_offset_toolcount))
self.assertEqual(payload[3], eeprom_length_toolcount)
def test_get_version(self):
version = 0x5DD5
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(version))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEqual(self.r.get_version(), version)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['GET_VERSION'])
self.assertEqual(
payload[1:3], Encoder.encode_uint16(constants.s3g_version))
def test_get_advanced_version(self):
info = {
'Version': 0x5DD5,
'InternalVersion': 0x0110,
'SoftwareVariant': 10,
'ReservedA': 0,
'ReservedB': 0
}
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(info['Version']))
response_payload.extend(Encoder.encode_uint16(info['InternalVersion']))
response_payload.append(info['SoftwareVariant'])
response_payload.append(info['ReservedA'])
response_payload.extend(Encoder.encode_uint16(info['ReservedB']))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
version_info = self.r.get_advanced_version()
self.assertEqual(version_info, info)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_query_command_dict[
'GET_ADVANCED_VERSION'])
self.assertEqual(
payload[1:3], Encoder.encode_uint16(constants.s3g_version))
def test_get_name(self):
import array
name = 'The Replicator'
n = array.array('B', name)
n.append(0)
n.append(0)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(n)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
version_info = self.r.get_name()
self.assertEqual(version_info, name)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['READ_FROM_EEPROM'])
def test_reset(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.reset()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['RESET'])
def test_is_finished(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(0)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEqual(self.r.is_finished(), 0)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['IS_FINISHED'])
def test_clear_buffer(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.clear_buffer()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['CLEAR_BUFFER'])
def test_pause(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.pause()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['PAUSE'])
def test_tool_query_negative_tool_index(self):
self.assertRaises(
errors.ToolIndexError,
self.r.tool_query,
-1,
constants.slave_query_command_dict['GET_VERSION']
)
def test_tool_query_too_high_tool_index(self):
self.assertRaises(
errors.ToolIndexError,
self.r.tool_query,
constants.max_tool_index + 1,
constants.slave_query_command_dict['GET_VERSION']
)
def test_tool_query_no_payload(self):
tool_index = 2
command = 0x12
response = 'abcdef'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(response)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(
self.r.tool_query(tool_index, command), response_payload)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], command)
def test_tool_query_payload(self):
tool_index = 2
command = 0x12
command_payload = 'ABCDEF'
response = 'abcdef'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(response)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.tool_query(
tool_index, command, command_payload), response_payload)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], command)
self.assertEquals(payload[3:], command_payload)
def test_read_from_eeprom_bad_length(self):
offset = 1234
length = constants.maximum_payload_length
self.assertRaises(
errors.EEPROMLengthError, self.r.read_from_EEPROM, offset, length)
def test_read_from_eeprom(self):
offset = 1234
length = constants.maximum_payload_length - 1
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(data)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.read_from_EEPROM(offset, length), data)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['READ_FROM_EEPROM'])
self.assertEquals(payload[1:3], Encoder.encode_uint16(offset))
self.assertEquals(payload[3], length)
def test_write_to_eeprom_too_much_data(self):
offset = 1234
length = constants.maximum_payload_length - 3
data = bytearray()
for i in range(0, length):
data.append(i)
self.assertRaises(
errors.EEPROMLengthError, self.r.write_to_EEPROM, offset, data)
def test_write_to_eeprom_bad_response_length(self):
offset = 1234
length = constants.maximum_payload_length - 4
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(length + 1)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.EEPROMMismatchError, self.r.write_to_EEPROM, offset, data)
def test_write_to_eeprom(self):
offset = 1234
length = constants.maximum_payload_length - 4
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(length)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.write_to_EEPROM(offset, data)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['WRITE_TO_EEPROM'])
self.assertEquals(payload[1:3], Encoder.encode_uint16(offset))
self.assertEquals(payload[3], length)
self.assertEquals(payload[4:], data)
def test_get_available_buffer_size(self):
buffer_size = 0xDEADBEEF
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint32(buffer_size))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.get_available_buffer_size(), buffer_size)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_query_command_dict[
'GET_AVAILABLE_BUFFER_SIZE'])
def test_abort_immediately(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.abort_immediately()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_query_command_dict[
'ABORT_IMMEDIATELY'])
def test_playback_capture_error_codes(self):
filename = 'abcdefghijkl'
for error_code in constants.sd_error_dict:
if error_code != 'SUCCESS':
self.outputstream.seek(0)
self.outputstream.truncate(0)
response_payload = bytearray()
response_payload.append(
constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict[error_code])
self.outputstream.write(
Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.SDCardError, self.r.playback_capture, filename)
def test_playback_capture(self):
filename = 'abcdefghijkl'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.playback_capture(filename)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['PLAYBACK_CAPTURE'])
self.assertEquals(payload[1:-1], filename)
self.assertEquals(payload[-1], 0x00)
def test_get_next_filename_error_codes(self):
for error_code in constants.sd_error_dict:
if error_code != 'SUCCESS':
self.outputstream.seek(0)
self.outputstream.truncate(0)
response_payload = bytearray()
response_payload.append(
constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict[error_code])
response_payload.append('\x00')
self.outputstream.write(
Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.SDCardError, self.r.get_next_filename, False)
def test_get_next_filename_reset(self):
filename = 'abcdefghijkl\x00'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict['SUCCESS'])
response_payload.extend(filename)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.get_next_filename(True), filename)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_query_command_dict[
'GET_NEXT_FILENAME'])
self.assertEquals(payload[1], 1)
def test_get_next_filename_no_reset(self):
filename = 'abcdefghijkl\x00'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict['SUCCESS'])
response_payload.extend(filename)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.get_next_filename(False), filename)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_query_command_dict[
'GET_NEXT_FILENAME'])
self.assertEquals(payload[1], 0)
def test_get_build_name(self):
build_name = 'abcdefghijklmnop\x00'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(build_name)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.get_build_name(), build_name)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['GET_BUILD_NAME'])
def test_get_extended_position(self):
position = [1, -2, 3, -4, 5]
endstop_states = 0x1234
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_int32(position[0]))
response_payload.extend(Encoder.encode_int32(position[1]))
response_payload.extend(Encoder.encode_int32(position[2]))
response_payload.extend(Encoder.encode_int32(position[3]))
response_payload.extend(Encoder.encode_int32(position[4]))
response_payload.extend(Encoder.encode_uint16(endstop_states))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
[returned_position,
returned_endstop_states] = self.r.get_extended_position()
self.assertEquals(returned_position, position)
self.assertEquals(returned_endstop_states, endstop_states)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_query_command_dict[
'GET_EXTENDED_POSITION'])
def test_wait_for_button_bad_button(self):
button = 'bad'
self.assertRaises(errors.ButtonError, self.r.wait_for_button,
button, 0, False, False, False)
def test_wait_for_button(self):
button = 0x10
options = 0x02 + 0x04 # reset on timeout, Clear screen
timeout = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.wait_for_button('up', timeout, False, True, True)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['WAIT_FOR_BUTTON'])
self.assertEqual(payload[1], button)
self.assertEqual(payload[2:4], Encoder.encode_uint16(timeout))
self.assertEqual(payload[4], options)
def test_queue_song(self):
song_id = 1
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.queue_song(song_id)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['QUEUE_SONG'])
self.assertEqual(payload[1], song_id)
def test_reset_to_factory(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.reset_to_factory()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'RESET_TO_FACTORY'])
self.assertEqual(payload[1], 0x00) # Reserved byte
def test_set_build_percent(self):
percent = 42
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_build_percent(percent)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'SET_BUILD_PERCENT'])
self.assertEqual(payload[1], percent)
self.assertEqual(payload[2], 0x00) # Reserved byte
def test_display_message(self):
row = 0x12
col = 0x34
timeout = 5
message = 'abcdefghij'
clear_existing = True
last_in_group = True
wait_for_button = True
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.display_message(row, col, message, timeout,
clear_existing, last_in_group, wait_for_button)
expectedBitfield = 0x01 + 0x02 + \
0x04 # Clear existing, last in group, wait for button
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_action_command_dict['DISPLAY_MESSAGE'])
self.assertEquals(payload[1], expectedBitfield)
self.assertEquals(payload[2], col)
self.assertEquals(payload[3], row)
self.assertEquals(payload[4], timeout)
self.assertEquals(payload[5:-1], message)
self.assertEquals(payload[-1], 0x00)
def test_build_start_notification_long_name(self):
build_name = 'abcdefghijklmnopqrstuvwzyx0123456789'
max_build_name_length = constants.maximum_payload_length - 7
expected_build_name = build_name[:max_build_name_length]
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.build_start_notification(build_name)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'BUILD_START_NOTIFICATION'])
self.assertEquals(
payload[1:5], Encoder.encode_uint32(0)) # Reserved uint32
self.assertEquals(payload[5:-1], expected_build_name)
self.assertEquals(payload[-1], 0x00)
def test_build_start_notification(self):
build_name = 'abcdefghijkl'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.build_start_notification(build_name)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'BUILD_START_NOTIFICATION'])
self.assertEquals(
payload[1:5], Encoder.encode_uint32(0)) # Reserved uint32
self.assertEquals(payload[5:-1], build_name)
self.assertEquals(payload[-1], 0x00)
def test_build_end_notification(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.build_end_notification()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'BUILD_END_NOTIFICATION'])
self.assertEquals(payload[1], 0)
def test_find_axes_minimums(self):
axes = ['x', 'y', 'z', 'b']
rate = 2500
timeout = 45
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.find_axes_minimums(axes, rate, timeout)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'FIND_AXES_MINIMUMS'])
self.assertEquals(payload[1], Encoder.encode_axes(axes))
self.assertEquals(payload[2:6], Encoder.encode_uint32(rate))
self.assertEquals(payload[6:8], Encoder.encode_uint16(timeout))
def test_find_axes_maximums(self):
axes = ['x', 'y', 'z', 'b']
rate = 2500
timeout = 45
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.find_axes_maximums(axes, rate, timeout)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'FIND_AXES_MAXIMUMS'])
self.assertEquals(payload[1], Encoder.encode_axes(axes))
self.assertEquals(payload[2:6], Encoder.encode_uint32(rate))
self.assertEquals(payload[6:8], Encoder.encode_uint16(timeout))
def test_tool_action_command_negative_tool_index(self):
self.assertRaises(
errors.ToolIndexError,
self.r.tool_action_command,
-1,
constants.slave_action_command_dict['INIT']
)
def test_tool_action_command_too_high_tool_index(self):
self.assertRaises(
errors.ToolIndexError,
self.r.tool_action_command,
constants.max_tool_index + 1,
constants.slave_action_command_dict['INIT']
)
def test_tool_action_command(self):
tool_index = 2
command = 0x12
command_payload = 'abcdefghij'
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.tool_action_command(tool_index, command, command_payload)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], command)
self.assertEquals(payload[3], len(command_payload))
self.assertEquals(payload[4:], command_payload)
def test_queue_extended_point_long_length(self):
point = [1, 2, 3, 4, 5, 6]
rate = 500
self.assertRaises(errors.PointLengthError,
self.r.queue_extended_point, point, rate, 0, 0)
def test_queue_extended_point_short_length(self):
point = [1, 2, 3, 4]
rate = 500
self.assertRaises(errors.PointLengthError,
self.r.queue_extended_point, point, rate, 0, 0)
def test_queue_extended_point_classic(self):
target = [1, -2, 3, -4, 5]
velocity = 6
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.queue_extended_point_classic(target, velocity)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'QUEUE_EXTENDED_POINT'])
for i in range(0, 5):
self.assertEquals(payload[(
i * 4 + 1):(i * 4 + 5)], Encoder.encode_int32(target[i]))
self.assertEquals(payload[21:25], Encoder.encode_int32(velocity))
def test_queue_extended_point(self):
target = [1, -2, 3, -4, 5]
velocity = 6
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.queue_extended_point(target, velocity, 0, 0)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'QUEUE_EXTENDED_POINT'])
for i in range(0, 5):
self.assertEquals(payload[(
i * 4 + 1):(i * 4 + 5)], Encoder.encode_int32(target[i]))
self.assertEquals(payload[21:25], Encoder.encode_int32(velocity))
def test_set_extended_position_short_length(self):
self.assertRaises(errors.PointLengthError,
self.r.set_extended_position, [1, 2, 3, 4])
def test_set_extended_position_long_length(self):
self.assertRaises(errors.PointLengthError,
self.r.set_extended_position, [1, 2, 3, 4, 5, 6])
def test_set_extended_position(self):
target = [1, -2, 3, -4, 5]
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.set_extended_position(target)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'SET_EXTENDED_POSITION'])
for i in range(0, 5):
self.assertEquals(Encoder.encode_int32(
target[i]), payload[(i * 4 + 1):(i * 4 + 5)])
def test_get_toolhead_version(self):
tool_index = 2
version = 0x5DD5
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(version))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.get_toolhead_version(tool_index), version)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(
payload[2], constants.slave_query_command_dict['GET_VERSION'])
self.assertEquals(
payload[3:5], Encoder.encode_uint16(constants.s3g_version))
def test_capture_to_file(self):
filename = 'test'
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.capture_to_file(filename)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['CAPTURE_TO_FILE'])
self.assertEqual(payload[1:-1], filename)
self.assertEqual(payload[-1], 0x00)
def test_capture_to_file_errors(self):
filename = 'test'
for error_code in constants.sd_error_dict:
if error_code != 'SUCCESS':
response_payload = bytearray()
response_payload.append(
constants.response_code_dict['SUCCESS'])
response_payload.append(constants.sd_error_dict[error_code])
self.outputstream.write(
Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.SDCardError, self.r.capture_to_file, filename)
def test_end_capture_to_file(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint32(0))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
sdResponse = self.r.end_capture_to_file()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['END_CAPTURE'])
def test_store_home_positions(self):
axes = ['x', 'z', 'b']
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.store_home_positions(axes)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'STORE_HOME_POSITIONS'])
self.assertEqual(payload[1], Encoder.encode_axes(axes))
def test_set_beep(self):
frequency = 1
duration = 2
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_beep(frequency, duration)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['SET_BEEP'])
self.assertEqual(payload[1:3], Encoder.encode_uint16(frequency))
self.assertEqual(payload[3:5], Encoder.encode_uint16(duration))
self.assertEqual(payload[5], 0x00) # reserved byte
def test_set_rgb_led(self):
r = 255
g = 254
b = 253
blink = 252
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_RGB_LED(r, g, b, blink)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['SET_RGB_LED'])
self.assertEqual(payload[1], r)
self.assertEqual(payload[2], g)
self.assertEqual(payload[3], b)
self.assertEqual(payload[4], blink)
self.assertEqual(payload[5], 0x00) # reserved byte
def test_set_potentiometer_value_capped(self):
axes = 0
value = 128
capped_value = 127
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_potentiometer_value(axes, value)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['SET_POT_VALUE'])
self.assertEqual(payload[1], axes)
self.assertEqual(payload[2], capped_value)
def test_set_potentiometer_value(self):
axes = 0
value = 2
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_potentiometer_value(axes, value)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['SET_POT_VALUE'])
self.assertEqual(payload[1], axes)
self.assertEqual(payload[2], value)
def test_recall_home_positions(self):
axes = ['x', 'z', 'b']
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.recall_home_positions(axes)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'RECALL_HOME_POSITIONS'])
self.assertEqual(payload[1], Encoder.encode_axes(axes))
def test_queue_extended_point_new_short_length(self):
point = [1, 2, 3, 4]
duration = 0
relative_axes = ['x']
self.assertRaises(errors.PointLengthError, self.r.queue_extended_point_new, point, duration, relative_axes)
def test_queue_extended_point_new_long_length(self):
point = [1, 2, 3, 4, 5, 6]
duration = 0
relative_axes = ['x']
self.assertRaises(errors.PointLengthError, self.r.queue_extended_point_new, point, duration, relative_axes)
def test_queue_extended_point_new(self):
point = [1, -2, 3, -4, 5]
duration = 10
relative_axes = ['x', 'z', 'b']
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.queue_extended_point_new(point, duration, relative_axes)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'QUEUE_EXTENDED_POINT_NEW'])
self.assertEqual(payload[1:5], Encoder.encode_int32(point[0]))
self.assertEqual(payload[5:9], Encoder.encode_int32(point[1]))
self.assertEqual(payload[9:13], Encoder.encode_int32(point[2]))
self.assertEqual(payload[13:17], Encoder.encode_int32(point[3]))
self.assertEqual(payload[17:21], Encoder.encode_int32(point[4]))
self.assertEqual(payload[21:25], Encoder.encode_uint32(duration))
self.assertEqual(payload[25], Encoder.encode_axes(relative_axes))
def test_wait_for_platform_ready(self):
toolhead = 0
delay = 100
timeout = 60
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.wait_for_platform_ready(toolhead, delay, timeout)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'WAIT_FOR_PLATFORM_READY'])
self.assertEqual(payload[1], toolhead)
self.assertEqual(payload[2:4], Encoder.encode_uint16(delay))
self.assertEqual(payload[4:], Encoder.encode_uint16(timeout))
def test_wait_for_tool_ready(self):
toolhead = 0
delay = 100
timeout = 60
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.wait_for_tool_ready(toolhead, delay, timeout)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'WAIT_FOR_TOOL_READY'])
self.assertEqual(payload[1], toolhead)
self.assertEqual(payload[2:4], Encoder.encode_uint16(delay))
self.assertEqual(payload[4:], Encoder.encode_uint16(timeout))
def test_toggle_axes(self):
axes = ['x', 'y', 'b']
enable_flag = True
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toggle_axes(axes, enable_flag)
bitfield = Encoder.encode_axes(axes)
bitfield |= 0x80 # because 'enable_flag is set
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['ENABLE_AXES'])
self.assertEqual(payload[1], bitfield)
def test_delay(self):
delay = 50
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.delay(delay)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['DELAY'])
self.assertEqual(payload[1:], Encoder.encode_uint32(delay))
def test_change_tool(self):
tool_index = 2
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.change_tool(tool_index)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['CHANGE_TOOL'])
self.assertEqual(payload[1], tool_index)
def test_get_build_stats(self):
stats = {
'BuildState': 0,
'BuildHours': 1,
'BuildMinutes': 2,
'LineNumber': 3,
'Reserved': 4,
}
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(stats['BuildState'])
response_payload.append(stats['BuildHours'])
response_payload.append(stats['BuildMinutes'])
response_payload.extend(Encoder.encode_uint32(stats['LineNumber']))
response_payload.extend(Encoder.encode_uint32(stats['Reserved']))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
info = self.r.get_build_stats()
self.assertEqual(info, stats)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['GET_BUILD_STATS'])
def test_get_communication_stats(self):
stats = {
'PacketsReceived': 0,
'PacketsSent': 1,
'NonResponsivePacketsSent': 2,
'PacketRetries': 3,
'NoiseBytes': 4,
}
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(
Encoder.encode_uint32(stats['PacketsReceived']))
response_payload.extend(Encoder.encode_uint32(stats['PacketsSent']))
response_payload.extend(
Encoder.encode_uint32(stats['NonResponsivePacketsSent']))
response_payload.extend(Encoder.encode_uint32(stats['PacketRetries']))
response_payload.extend(Encoder.encode_uint32(stats['NoiseBytes']))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
info = self.r.get_communication_stats()
self.assertEqual(info, stats)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_query_command_dict[
'GET_COMMUNICATION_STATS'])
def test_get_motherboard_status(self):
flags = {
'preheat': True,
'manual_mode': False,
'onboard_script': True,
'onboard_process': False,
'wait_for_button': True,
'build_cancelling': False,
'heat_shutdown': True,
'power_error': False,
}
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
flagValues = [0 == i % 2 for i in range(8)]
bitfield = 0
for i in range(len(flagValues)):
if flagValues[i]:
bitfield += 1 << i
response_payload.append(bitfield)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
responseFlags = self.r.get_motherboard_status()
self.assertEqual(flags, responseFlags)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_query_command_dict[
'GET_MOTHERBOARD_STATUS'])
def test_extended_stop_error(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(1)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.ExtendedStopError, self.r.extended_stop, True, True)
def test_extended_stop(self):
expected_states = [
[False, False, 0x00],
[True, False, 0x01],
[False, True, 0x02],
[True, True, 0x03],
]
for expected_state in expected_states:
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(0)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.inputstream.seek(0)
self.r.extended_stop(expected_state[0], expected_state[1])
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_query_command_dict[
'EXTENDED_STOP'])
self.assertEqual(payload[1], expected_state[2])
def test_get_pid_state(self):
expectedDict = {
"ExtruderError": 1,
"ExtruderDelta": 2,
"ExtruderLastTerm": 3,
"PlatformError": 4,
"PlatformDelta": 5,
"PlatformLastTerm": 6,
}
toolIndex = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
for i in range(6):
response_payload.extend(Encoder.encode_uint16(i + 1))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(expectedDict, self.r.get_PID_state(toolIndex))
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_query_command_dict['GET_PID_STATE'])
def test_toolhead_init(self):
toolIndex = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toolhead_init(toolIndex)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
expectedPayload = bytearray()
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_action_command_dict['INIT'])
def test_toolhead_abort(self):
toolIndex = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toolhead_abort(toolIndex)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_action_command_dict['ABORT'])
self.assertEqual(payload[3], len(bytearray()))
self.assertEqual(payload[4:], bytearray())
def test_toolhead_pause(self):
toolIndex = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toolhead_pause(toolIndex)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_action_command_dict['PAUSE'])
self.assertEqual(payload[3], len(bytearray()))
self.assertEqual(payload[4:], bytearray())
def test_set_servo_1_position(self):
toolIndex = 0
theta = 90
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_servo1_position(toolIndex, theta)
expectedPayload = bytearray()
expectedPayload.append(theta)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(payload[2], constants.slave_action_command_dict[
'SET_SERVO_1_POSITION'])
self.assertEqual(payload[3], len(expectedPayload))
self.assertEqual(payload[4:], expectedPayload)
def test_toggle_motor_1(self):
toolIndex = 0
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.toggle_motor1(toolIndex, True, True)
expectedPayload = bytearray()
bitfield = 0
bitfield |= 0x01 + 0x02
expectedPayload.append(bitfield)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_action_command_dict['TOGGLE_MOTOR_1'])
self.assertEqual(payload[3], len(expectedPayload))
self.assertEqual(payload[4:], expectedPayload)
def test_set_motor_1_speed_rpm(self):
toolIndex = 0
duration = 50
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.set_motor1_speed_RPM(toolIndex, duration)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
expectedPayload = bytearray()
expectedPayload.extend(Encoder.encode_uint32(duration))
self.assertEqual(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(payload[2], constants.slave_action_command_dict[
'SET_MOTOR_1_SPEED_RPM'])
self.assertEqual(payload[3], len(expectedPayload))
self.assertEqual(payload[4:], expectedPayload)
def test_get_tool_status(self):
toolIndex = 0
expectedDict = {
"ExtruderReady": True,
"ExtruderNotPluggedIn": True,
"ExtruderOverMaxTemp": True,
"ExtruderNotHeating": True,
"ExtruderDroppingTemp": True,
"PlatformError": True,
"ExtruderError": True,
}
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
returnBitfield = 0xFF
response_payload.append(returnBitfield)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEqual(expectedDict, self.r.get_tool_status(toolIndex))
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(
payload[2], constants.slave_query_command_dict['GET_TOOL_STATUS'])
def test_get_motor_speed(self):
toolIndex = 0
speed = 100
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint32(speed))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEqual(speed, self.r.get_motor1_speed(toolIndex))
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEqual(payload[1], toolIndex)
self.assertEqual(payload[2], constants.slave_query_command_dict[
'GET_MOTOR_1_SPEED_RPM'])
def test_init(self):
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.init()
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['INIT'])
def test_get_toolhead_temperature(self):
tool_index = 2
temperature = 1234
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(temperature))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(
self.r.get_toolhead_temperature(tool_index), temperature)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'GET_TOOLHEAD_TEMP'])
# TODO: also test for bad codes, both here and in platform.
def test_is_tool_ready_bad_response(self):
tool_index = 2
ready_state = 2
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(ready_state)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(
errors.HeatElementReadyError, self.r.is_tool_ready, tool_index)
def test_is_tool_ready(self):
tool_index = 2
ready_states = [
[True, 1],
[False, 0]
]
for ready_state in ready_states:
self.outputstream.seek(0)
self.outputstream.truncate(0)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(ready_state[1])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.inputstream.seek(0)
self.assertEquals(self.r.is_tool_ready(tool_index), ready_state[0])
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'IS_TOOL_READY'])
def test_read_from_toolhead_eeprom_bad_length(self):
tool_index = 2
offset = 1234
length = constants.maximum_payload_length
self.assertRaises(errors.EEPROMLengthError, self.r.read_from_toolhead_EEPROM, tool_index, offset, length)
def test_read_from_toolhead_eeprom(self):
tool_index = 2
offset = 1234
length = constants.maximum_payload_length - 1
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(data)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(self.r.read_from_toolhead_EEPROM(
tool_index, offset, length), data)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'READ_FROM_EEPROM'])
self.assertEquals(payload[3:5], Encoder.encode_uint16(offset))
self.assertEquals(payload[5], length)
def test_write_to_toolhead_eeprom_too_much_data(self):
tool_index = 2
offset = 1234
length = constants.maximum_payload_length - 5
data = bytearray()
for i in range(0, length):
data.append(i)
self.assertRaises(errors.EEPROMLengthError, self.r.write_to_toolhead_EEPROM, tool_index, offset, data)
def test_write_to_toolhead_eeprom_bad_response_length(self):
tool_index = 2
offset = 1234
length = constants.maximum_payload_length - 6
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(length + 1)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(errors.EEPROMMismatchError, self.r.write_to_toolhead_EEPROM, tool_index, offset, data)
def test_write_to_toolhead_eeprom(self):
tool_index = 2
offset = 1234
length = constants.maximum_payload_length - 6
data = bytearray()
for i in range(0, length):
data.append(i)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(length)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.write_to_toolhead_EEPROM(tool_index, offset, data)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(
payload[2], constants.slave_query_command_dict['WRITE_TO_EEPROM'])
self.assertEquals(payload[3:5], Encoder.encode_uint16(offset))
self.assertEquals(payload[6:], data)
def test_get_platform_temperature(self):
tool_index = 2
temperature = 1234
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(temperature))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(
self.r.get_platform_temperature(tool_index), temperature)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'GET_PLATFORM_TEMP'])
def test_get_toolhead_target_temperature(self):
tool_index = 2
temperature = 1234
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(temperature))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(
self.r.get_toolhead_target_temperature(tool_index), temperature)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'GET_TOOLHEAD_TARGET_TEMP'])
def test_get_platform_target_temperature(self):
tool_index = 2
temperature = 1234
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.extend(Encoder.encode_uint16(temperature))
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertEquals(
self.r.get_platform_target_temperature(tool_index), temperature)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'GET_PLATFORM_TARGET_TEMP'])
def test_is_platform_ready_bad_response(self):
tool_index = 2
ready_state = 2
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(ready_state)
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.assertRaises(errors.HeatElementReadyError,
self.r.is_platform_ready, tool_index)
def test_is_platform_ready(self):
tool_index = 2
ready_states = [
[True, 1],
[False, 0],
]
for ready_state in ready_states:
self.outputstream.seek(0)
self.outputstream.truncate(0)
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
response_payload.append(ready_state[1])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.inputstream.seek(0)
self.assertEquals(
self.r.is_platform_ready(tool_index), ready_state[0])
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(
payload[0], constants.host_query_command_dict['TOOL_QUERY'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_query_command_dict[
'IS_PLATFORM_READY'])
def test_toggle_fan(self):
tool_index = 2
fan_states = [True, False]
for fan_state in fan_states:
self.outputstream.seek(0)
self.outputstream.truncate(0)
self.outputstream.write(Encoder.encode_payload(
[constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.inputstream.seek(0)
self.r.toggle_fan(tool_index, fan_state)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(
payload[2], constants.slave_action_command_dict['TOGGLE_FAN'])
self.assertEquals(payload[3], 1)
self.assertEquals(payload[4], fan_state)
def test_toggle_valve(self):
tool_index = 2
fan_states = [True, False]
for fan_state in fan_states:
self.outputstream.seek(0)
self.outputstream.truncate(0)
self.outputstream.write(Encoder.encode_payload(
[constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.inputstream.seek(0)
self.r.toggle_extra_output(tool_index, fan_state)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_action_command_dict[
'TOGGLE_EXTRA_OUTPUT'])
self.assertEquals(payload[3], 1)
self.assertEquals(payload[4], fan_state)
def test_set_toolhead_temp(self):
tool_index = 2
temp = 1024
self.outputstream.seek(0)
self.outputstream.truncate(0)
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.set_toolhead_temperature(tool_index, temp)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_action_command_dict[
'SET_TOOLHEAD_TARGET_TEMP'])
self.assertEquals(payload[3], 2) # Temp is a byte of len 2
self.assertEquals(payload[4:6], Encoder.encode_int16(temp))
def test_set_platform_temp(self):
tool_index = 2
temp = 1024
self.outputstream.seek(0)
self.outputstream.truncate(0)
self.outputstream.write(
Encoder.encode_payload([constants.response_code_dict['SUCCESS']]))
self.outputstream.seek(0)
self.r.set_platform_temperature(tool_index, temp)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEquals(payload[0], constants.host_action_command_dict[
'TOOL_ACTION_COMMAND'])
self.assertEquals(payload[1], tool_index)
self.assertEquals(payload[2], constants.slave_action_command_dict[
'SET_PLATFORM_TEMP'])
self.assertEquals(payload[3], 2) # Temp is a byte of len 2
self.assertEquals(payload[4:6], Encoder.encode_int16(temp))
def test_x3g_version(self):
checksum = 0x0000
high_bite = 6
low_bite = 1
extra_byte = 0
the_pid = 0xD314
response_payload = bytearray()
response_payload.append(constants.response_code_dict['SUCCESS'])
self.outputstream.write(Encoder.encode_payload(response_payload))
self.outputstream.seek(0)
self.r.x3g_version(high_bite, low_bite, checksum, pid=the_pid)
packet = bytearray(self.inputstream.getvalue())
payload = Encoder.decode_packet(packet)
self.assertEqual(
payload[0], constants.host_action_command_dict['X3G_VERSION'])
self.assertEqual(payload[1], high_bite)
self.assertEqual(payload[2], low_bite)
self.assertEqual(payload[3], extra_byte)
self.assertEqual(payload[4:8], struct.pack('<I', checksum))
self.assertEqual(payload[8:10], struct.pack('<H', the_pid))
for i in range(10, 21):
self.assertEqual(payload[i], extra_byte)
if __name__ == "__main__":
unittest.main()
|
laperry1/android_external_chromium_org
|
refs/heads/cm-12.1
|
tools/cr/cr/actions/__init__.py
|
112
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A package to hold all the actions for the cr tool.
This package holds the standard actions used by the commands in the cr tool.
These actions are the things that actually perform the work, they are generally
run in sequences by commands.
"""
import cr
cr.Import(__name__, 'action')
cr.Import(__name__, 'runner')
cr.Import(__name__, 'builder')
cr.Import(__name__, 'installer')
|
houssemFat/MeeM-Dev
|
refs/heads/master
|
core/apps/decorators/authorization.py
|
12133432
| |
harmattan/qt-components-graphics-bb10
|
refs/heads/master
|
scripts/verify.py
|
1
|
#!/usr/bin/python
from itertools import ifilter
from os import walk, path
from md5 import new as md5
def traversedir(where):
for root, dirs, candidates in walk(where):
for filename in ifilter(lambda x: '.' in x, candidates):
yield(path.join(root, filename))
def parse_tree(where, theme={}):
for abspath in ifilter(lambda x: '.' in x, traversedir(where)):
if not abspath.endswith(('.svg', '.png', '.jpg',)):
continue
basename, _, extension = abspath.rpartition('.')
theme.setdefault(path.basename(basename), []).append((abspath, extension,))
return theme
def compare(file_a, file_b):
with file(file_a ,'rb') as f1:
with file(file_b, 'rb') as f2:
return md5(f1.read()).digest() == md5(f2.read()).digest()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
sys.stderr.write("Usage: merge.py <origin_dir> <target_dir>..\n")
exit(255)
candidates = {}
theme = parse_tree(sys.argv[1])
for value in sys.argv[2:]:
print value
candidates = parse_tree(value, candidates)
missing, match = [], []
for key, values in theme.iteritems():
if key not in candidates:
missing.append(values[0])
else:
for value in candidates[key]:
if values[0][1] != value[1]:
continue
if compare(values[0][0], value[0]):
match.append((values[0][0], values[0],))
break
else:
missing.append(values[0])
print "match: " + str(int((len(match) * 100) / len(theme))) + "% (" + str(len(match)) + ")"
|
jeffzheng1/tensorflow
|
refs/heads/master
|
tensorflow/contrib/layers/python/layers/feature_column_ops_test.py
|
1
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.ops import init_ops
class TransformerTest(tf.test.TestCase):
def testRealValuedColumnIsIdentityTransformation(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
with self.test_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 110], [110., 20], [-3, -3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
with self.test_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
# doesn't increase the number of ops.
transformer.transform(bucket)
self.assertEqual(num_of_ops, len(sess.graph.get_operations()))
def testSparseColumnWithHashBucket(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testSparseIntColumnWithHashBucket(self):
"""Tests a sparse column with int values."""
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(
"wire", 10, dtype=tf.int64)
wire_tensor = tf.SparseTensor(values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(
tf.contrib.layers.embedding_column(hashed_sparse, 10))
expected = feature_column_ops._Transformer(features).transform(
hashed_sparse)
with self.test_session():
self.assertAllEqual(output.values.eval(), expected.values.eval())
self.assertAllEqual(output.indices.eval(), expected.indices.eval())
self.assertAllEqual(output.shape.eval(), expected.shape.eval())
def testSparseColumnWithKeys(self):
keys_sparse = tf.contrib.layers.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer"])
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
with self.test_session():
tf.initialize_all_tables().run()
self.assertEqual(output.values.dtype, tf.int64)
self.assertAllEqual(output.values.eval(), [1, 2, 0])
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testSparseColumnWithHashBucket_IsIntegerized(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
"wire", 10)
wire_tensor = tf.SparseTensor(values=[100, 1, 25],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int32)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
output = feature_column_ops._Transformer(features).transform(weighted_ids)
with self.test_session():
tf.initialize_all_tables().run()
self.assertAllEqual(output[0].shape.eval(), ids_tensor.shape.eval())
self.assertAllEqual(output[0].indices.eval(), ids_tensor.indices.eval())
self.assertAllEqual(output[0].values.eval(), [2, 2, 0])
self.assertAllEqual(output[1].shape.eval(), weights_tensor.shape.eval())
self.assertAllEqual(output[1].indices.eval(),
weights_tensor.indices.eval())
self.assertEqual(output[1].values.dtype, tf.float32)
self.assertAllEqual(output[1].values.eval(), weights_tensor.values.eval())
def testCrossColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=15)
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output = feature_column_ops._Transformer(features).transform(
country_language)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output = feature_column_ops._Transformer(features).transform(country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithMultiDimensionBucketizedColumn(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=1000)
with tf.Graph().as_default():
features = {"price": tf.constant([[20., 210.], [110., 50.], [-3., -30.]]),
"country": tf.SparseTensor(values=["US", "SV", "US"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[country_price],
num_outputs=1))
weights = column_to_variable[country_price][0]
grad = tf.squeeze(tf.gradients(output, weights)[0].values)
with self.test_session():
tf.global_variables_initializer().run()
self.assertEqual(len(grad.eval()), 6)
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
wire = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_country_price = tf.contrib.layers.crossed_column(
[wire, country_price], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [0, 1], [0, 2]],
shape=[1, 3])
}
output = feature_column_ops._Transformer(features).transform(
wire_country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testIfFeatureTableContainsTransformationReturnIt(self):
any_column = tf.contrib.layers.sparse_column_with_hash_bucket("sparse", 10)
features = {any_column: "any-thing-even-not-a-tensor"}
output = feature_column_ops._Transformer(features).transform(any_column)
self.assertEqual(output, "any-thing-even-not-a-tensor")
class CreateInputLayersForDNNsTest(tf.test.TestCase):
def testAllDNNColumns(self):
sparse_column = tf.contrib.layers.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
real_valued_column = tf.contrib.layers.real_valued_column("income", 2)
one_hot_column = tf.contrib.layers.one_hot_column(sparse_column)
embedding_column = tf.contrib.layers.embedding_column(sparse_column, 10)
features = {
"ids": tf.SparseTensor(
values=["c", "b", "a"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
"income": tf.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]])
}
output = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_column,
embedding_column,
real_valued_column])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual(output.eval().shape, [3, 2 + 4 + 10])
def testRealValuedColumn(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.],
[110, 0.],
[-3, 30]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnWithNormalizer(self):
real_valued = tf.contrib.layers.real_valued_column(
"price", normalizer=lambda x: x - 2)
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
real_valued = tf.contrib.layers.real_valued_column(
"price", 2, normalizer=lambda x: x - 2)
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval() - 2)
def testBucketizedColumnSucceedsForDNN(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testBucketizedColumnWithNormalizerSucceedsForDNN(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column(
"price", normalizer=lambda x: x - 15),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testBucketizedColumnWithMultiDimensionsSucceedsForDNN(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets [2, 3], [3, 2], [0, 0]. dimension = 2
features = {"price": tf.constant([[20., 200],
[110, 50],
[-3, -3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testOneHotColumnFromWeightedSparseColumnFails(self):
ids_column = tf.contrib.layers.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = tf.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
shape=[3, 2])
weighted_ids_column = tf.contrib.layers.weighted_sparse_column(ids_column,
"weights")
weights_tensor = tf.SparseTensor(
values=[10.0, 20.0, 30.0, 40.0],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
shape=[3, 2])
features = {"ids": ids_tensor, "weights": weights_tensor}
one_hot_column = tf.contrib.layers.one_hot_column(weighted_ids_column)
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
with self.assertRaisesRegexp(
ValueError,
"one_hot_column does not yet support weighted_sparse_column"):
_ = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_column])
def testOneHotColumnFromSparseColumnWithKeysSucceedsForDNN(self):
ids_column = tf.contrib.layers.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = tf.SparseTensor(
values=["c", "b", "a"], indices=[[0, 0], [1, 0], [2, 0]], shape=[3, 1])
one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],
output.eval())
def testOneHotColumnFromMultivalentSparseColumnWithKeysSucceedsForDNN(self):
ids_column = tf.contrib.layers.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
ids_tensor = tf.SparseTensor(
values=["c", "b", "a", "c"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
shape=[3, 2])
one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)
features = {"ids": ids_tensor}
output = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
def testOneHotColumnFromSparseColumnWithIntegerizedFeaturePassesForDNN(self):
ids_column = tf.contrib.layers.sparse_column_with_integerized_feature(
"ids", bucket_size=4)
one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)
features = {"ids": tf.SparseTensor(
values=[2, 1, 0, 2],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
shape=[3, 2])}
output = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],
output.eval())
def testOneHotColumnFromSparseColumnWithHashBucketSucceedsForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("feat", 10)
wire_tensor = tf.SparseTensor(
values=["a", "b", "c1", "c2"],
indices=[[0, 0], [1, 0], [2, 0], [2, 1]],
shape=[3, 2])
features = {"feat": wire_tensor}
one_hot_sparse = tf.contrib.layers.one_hot_column(hashed_sparse)
output = tf.contrib.layers.input_from_feature_columns(features,
[one_hot_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual([3, 10], output.eval().shape)
def testEmbeddingColumnSucceedsForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(
values=["omar", "stringer", "marlo", "xx", "yy"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0], [3, 0]],
shape=[4, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [4, 10])
def testHashedEmbeddingColumnSucceedsForDNN(self):
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo", "omar"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0]],
shape=[3, 2])
features = {"wire": wire_tensor}
# Big enough hash space so that hopefully there is no collision
embedded_sparse = tf.contrib.layers.hashed_embedding_column("wire", 1000, 3)
output = tf.contrib.layers.input_from_feature_columns(
features, [embedded_sparse], weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
grad = tf.gradients(output, weights)
with self.test_session():
tf.global_variables_initializer().run()
gradient_values = []
# Collect the gradient from the different partitions (one in this test)
for p in range(len(grad)):
gradient_values.extend(grad[p].values.eval())
gradient_values.sort()
self.assertAllEqual(gradient_values, [0.5]*6 + [2]*3)
def testEmbeddingColumnWithInitializerSucceedsForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(init_value))
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
def testEmbeddingColumnWithMultipleInitializersFails(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embedded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=42,
stddev=1337))
embedded_sparse_alternate = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=1337,
stddev=42))
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Duplicate feature column key found for column: wire_embedding"):
tf.contrib.layers.input_from_feature_columns(
features, [embedded_sparse, embedded_sparse_alternate])
def testEmbeddingColumnWithWeightedSparseColumnSucceedsForDNN(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(weighted_ids, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWithCrossedColumnSucceedsForDNN(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(crossed, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testSparseColumnFailsForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: wire"):
tf.global_variables_initializer().run()
tf.contrib.layers.input_from_feature_columns(features, [hashed_sparse])
def testWeightedSparseColumnFailsForDNN(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: ids_weighted_by_weights"):
tf.initialize_all_tables().run()
tf.contrib.layers.input_from_feature_columns(features, [weighted_ids])
def testCrossedColumnFailsForDNN(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: aaa_X_bbb"):
tf.global_variables_initializer().run()
tf.contrib.layers.input_from_feature_columns(features, [crossed])
def testDeepColumnsSucceedForDNN(self):
real_valued = tf.contrib.layers.real_valued_column("income", 3)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"income": tf.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price": tf.constant([[20., 200], [110, 2], [-20, -30]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(133.7))
output = tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
def testEmbeddingColumnForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
weighted_ids,
1,
combiner="sum",
initializer=init_ops.ones_initializer())
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
# score: (sum of weights)
self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
def testInputLayerWithCollectionsForDNN(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# one variable for embeded sparse
self.assertEqual(1, len(weights))
def testInputLayerWithTrainableArgForDNN(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=False)
# There should not be any trainable variables
self.assertEqual(0, len(tf.trainable_variables()))
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=True)
# There should one trainable variable for embeded sparse
self.assertEqual(1, len(tf.trainable_variables()))
class SequenceInputFromFeatureColumnTest(tf.test.TestCase):
def testSupportedColumns(self):
measurement = tf.contrib.layers.real_valued_column("measurements")
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", 100)
pets = tf.contrib.layers.sparse_column_with_hash_bucket(
"pets", 100)
ids = tf.contrib.layers.sparse_column_with_integerized_feature(
"id", 100)
country_x_pets = tf.contrib.layers.crossed_column(
[country, pets], 100)
country_x_pets_onehot = tf.contrib.layers.one_hot_column(
country_x_pets)
bucketized_measurement = tf.contrib.layers.bucketized_column(
measurement, [.25, .5, .75])
embedded_id = tf.contrib.layers.embedding_column(
ids, 100)
# `_BucketizedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _BucketizedColumn is not currently supported",
tf.contrib.layers.sequence_input_from_feature_columns,
{}, [measurement, bucketized_measurement])
# `_CrossedColumn` is not supported.
self.assertRaisesRegexp(
ValueError,
"FeatureColumn type _CrossedColumn is not currently supported",
tf.contrib.layers.sequence_input_from_feature_columns,
{}, [embedded_id, country_x_pets])
# `country_x_pets_onehot` depends on a `_CrossedColumn` which is forbidden.
self.assertRaisesRegexp(
ValueError,
"Column country_X_pets .* _CrossedColumn",
tf.contrib.layers.sequence_input_from_feature_columns,
{}, [embedded_id, country_x_pets_onehot])
def testRealValuedColumn(self):
batch_size = 4
sequence_length = 8
dimension = 3
np.random.seed(1111)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = tf.contrib.layers.real_valued_column("measurements")
columns_to_tensors = {"measurements": tf.constant(measurement_input)}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(measurement_input, model_inputs)
def testRealValuedColumnWithExtraDimensions(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
np.random.seed(2222)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = tf.contrib.layers.real_valued_column("measurements")
columns_to_tensors = {"measurements": tf.constant(measurement_input)}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(reshaped_measurements, model_inputs)
def testRealValuedColumnWithNormalizer(self):
batch_size = 4
sequence_length = 8
dimension = 3
normalizer = lambda x: x - 2
np.random.seed(3333)
measurement_input = np.random.rand(batch_size, sequence_length, dimension)
measurement_column = tf.contrib.layers.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {"measurements": tf.constant(measurement_input)}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(measurement_input), model_inputs)
def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):
batch_size = 4
sequence_length = 8
dimensions = [3, 4, 5]
normalizer = lambda x: x / 2.0
np.random.seed(1234)
measurement_input = np.random.rand(batch_size, sequence_length, *dimensions)
measurement_column = tf.contrib.layers.real_valued_column(
"measurements", normalizer=normalizer)
columns_to_tensors = {"measurements": tf.constant(measurement_input)}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [measurement_column])
expected_shape = [batch_size, sequence_length, np.prod(dimensions)]
reshaped_measurements = np.reshape(measurement_input, expected_shape)
with self.test_session() as sess:
model_inputs = sess.run(model_input_tensor)
self.assertAllClose(normalizer(reshaped_measurements), model_inputs)
def testOneHotColumnFromSparseColumnWithKeys(self):
ids_tensor = tf.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
ids_column = tf.contrib.layers.sparse_column_with_keys(
"ids", ["a", "b", "c", "unseen"])
one_hot_column = tf.contrib.layers.one_hot_column(ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, 4])
expected_model_input = np.array(
[[[0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[1, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]], dtype=np.float32)
self.assertAllEqual(expected_input_shape, model_input.shape)
self.assertAllClose(expected_model_input, model_input)
def testOneHotColumnFromSparseColumnWithHashBucket(self):
hash_buckets = 10
ids_tensor = tf.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
hashed_ids_column = tf.contrib.layers.sparse_column_with_hash_bucket(
"ids", hash_buckets)
one_hot_column = tf.contrib.layers.one_hot_column(hashed_ids_column)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [one_hot_column])
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = np.array([4, 3, hash_buckets])
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumn(self):
hash_buckets = 10
embedding_dimension = 5
ids_tensor = tf.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
expected_input_shape = np.array([4, 3, embedding_dimension])
hashed_ids_column = tf.contrib.layers.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = tf.contrib.layers.embedding_column(
hashed_ids_column, embedding_dimension)
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, [embedded_column])
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
model_input = sess.run(model_input_tensor)
self.assertAllEqual(expected_input_shape, model_input.shape)
def testEmbeddingColumnGradient(self):
hash_buckets = 1000
embedding_dimension = 3
ids_tensor = tf.SparseTensor(
values=["c", "b",
"a", "c", "b",
"b"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
hashed_ids_column = tf.contrib.layers.sparse_column_with_hash_bucket(
"ids", hash_buckets)
embedded_column = tf.contrib.layers.embedding_column(
hashed_ids_column, embedding_dimension, combiner="sum")
columns_to_tensors = {"ids": ids_tensor}
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors,
[embedded_column],
weight_collections=["my_collection"])
embedding_weights = tf.get_collection("my_collection")
gradient_tensor = tf.gradients(model_input_tensor, embedding_weights)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
model_input, gradients = sess.run([model_input_tensor, gradient_tensor])
expected_input_shape = [4, 3, embedding_dimension]
self.assertAllEqual(expected_input_shape, model_input.shape)
# `ids_tensor` consists of 7 instances of <empty>, 3 occurences of "b",
# 2 occurences of "c" and 1 instance of "a".
expected_gradient_values = sorted([0., 3., 2., 1.] * embedding_dimension)
actual_gradient_values = np.sort(gradients[0].values, axis=None)
self.assertAllClose(expected_gradient_values, actual_gradient_values)
def testMultipleColumns(self):
batch_size = 4
sequence_length = 3
measurement_dimension = 5
country_hash_size = 10
max_id = 200
id_embedding_dimension = 11
normalizer = lambda x: x / 10.0
measurement_tensor = tf.random_uniform(
[batch_size, sequence_length, measurement_dimension])
country_tensor = tf.SparseTensor(
values=["us", "ca",
"ru", "fr", "ca",
"mx"],
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 0, 1], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
id_tensor = tf.SparseTensor(
values=[2, 5,
26, 123, 1,
0],
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 1],
[1, 0, 0], [1, 1, 0],
[3, 2, 0]],
shape=[4, 3, 2])
columns_to_tensors = {"measurements": measurement_tensor,
"country": country_tensor,
"id": id_tensor}
measurement_column = tf.contrib.layers.real_valued_column(
"measurements", normalizer=normalizer)
country_column = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", country_hash_size)
id_column = tf.contrib.layers.sparse_column_with_integerized_feature(
"id", max_id)
onehot_country_column = tf.contrib.layers.one_hot_column(country_column)
embedded_id_column = tf.contrib.layers.embedding_column(
id_column, id_embedding_dimension)
model_input_columns = [measurement_column,
onehot_country_column,
embedded_id_column]
model_input_tensor = tf.contrib.layers.sequence_input_from_feature_columns(
columns_to_tensors, model_input_columns)
self.assertEqual(tf.float32, model_input_tensor.dtype)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
model_input = sess.run(model_input_tensor)
expected_input_shape = [
batch_size,
sequence_length,
measurement_dimension + country_hash_size + id_embedding_dimension]
self.assertAllEqual(expected_input_shape, model_input.shape)
class WeightedSumTest(tf.test.TestCase):
def testSparseColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testSparseIntColumn(self):
"""Tests a sparse column with int values."""
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(
"wire", 10, dtype=tf.int64)
wire_tensor = tf.SparseTensor(values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating weighted sum for column: wire_embedding"):
tf.global_variables_initializer().run()
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[embeded_sparse],
num_outputs=5)
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testAllWideColumns(self):
real_valued = tf.contrib.layers.real_valued_column("income", 2)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
crossed = tf.contrib.layers.crossed_column([bucket, hashed_sparse], 100)
features = {
"income": tf.constant([[20., 10], [110, 0], [-3, 30]]),
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
output, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued, bucket, hashed_sparse, crossed],
num_outputs=5)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllEqual(output.eval().shape, [3, 5])
def testPredictions(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
age = tf.contrib.layers.real_valued_column("age")
with tf.Graph().as_default():
features = {
"age": tf.constant([[1], [2]]),
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[age, language],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: 0.1 + age*0.1
sess.run(column_to_variable[age][0].assign([[0.2]]))
self.assertAllClose(output.eval(), [[0.3], [0.5]])
# score: 0.1 + age*0.1 + language_weight[language_index]
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.5], [0.6]])
def testJointPredictions(self):
country = tf.contrib.layers.sparse_column_with_keys(
column_name="country",
keys=["us", "finland"])
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
with tf.Graph().as_default():
features = {
"country": tf.SparseTensor(values=["finland", "us"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}
output, variables, bias = (
tf.contrib.layers.joint_weighted_sum_from_feature_columns(
features, [country, language], num_outputs=1))
# Assert that only a single weight is created.
self.assertEqual(len(variables), 1)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# shape is [5,1] because 1 class and 2 + 3 features.
self.assertEquals(variables[0].get_shape().as_list(), [5, 1])
# score: bias + country_weight + language_weight
sess.run(variables[0].assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.8], [0.5]])
def testJointPredictionsWeightedFails(self):
language = tf.contrib.layers.weighted_sparse_column(
tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"]),
"weight")
with tf.Graph().as_default():
features = {
"weight": tf.constant([[1], [2]]),
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}
with self.assertRaises(AssertionError):
tf.contrib.layers.joint_weighted_sum_from_feature_columns(
features, [language], num_outputs=1)
def testJointPredictionsRealFails(self):
age = tf.contrib.layers.real_valued_column("age")
with tf.Graph().as_default():
features = {
"age": tf.constant([[1], [2]]),
}
with self.assertRaises(NotImplementedError):
tf.contrib.layers.joint_weighted_sum_from_feature_columns(
features, [age], num_outputs=1)
def testPredictionsWithWeightedSparseColumn(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
weighted_language = tf.contrib.layers.weighted_sparse_column(
sparse_id_column=language,
weight_column_name="age")
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"age": tf.SparseTensor(values=[10.0, 20.0],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [weighted_language], num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: bias + age*language_weight[index]
sess.run(column_to_variable[weighted_language][0].assign(
[[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[3.1], [2.1]])
def testPredictionsWithMultivalentColumnButNoCross(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[language],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
# score: 0.1 + language_weight['hindi'] + language_weight['english']
sess.run(bias.assign([0.1]))
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.4]])
def testSparseFeatureColumnWithHashedBucketSize(self):
movies = tf.contrib.layers.sparse_column_with_hash_bucket(
column_name="movies", hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"movies": tf.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
shape=[2, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[movies],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (15, 1))
sess.run(weights.assign(weights + 0.4))
# score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8
# score for second example = 0.4 (winter sleep)
self.assertAllClose(output.eval(), [[0.8], [0.4]])
def testCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
self.assertAllClose(output.eval(), [[0.4], [0.4]])
def testCrossColumnByItself(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
language_language = tf.contrib.layers.crossed_column(
[language, language], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [language_language],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[language_language][0]
sess.run(weights.assign(weights + 0.4))
# There are two features inside language. If we cross it by itself we'll
# have four crossed features.
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
# bucket size has to be big enough to allow sharding.
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=64 << 18)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=64 << 18)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
with tf.variable_scope(
"weighted_sum_from_feature_columns",
features.values(),
partitioner=tf.min_max_variable_partitioner(
max_partitions=10, min_slice_size=((64 << 20) - 1))) as scope:
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, language, country_language],
num_outputs=1,
scope=scope))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
self.assertEqual(2, len(column_to_variable[country]))
self.assertEqual(3, len(column_to_variable[language]))
self.assertEqual(2, len(column_to_variable[country_language]))
weights = column_to_variable[country_language]
for partition_variable in weights:
sess.run(partition_variable.assign(partition_variable + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[140.], [14.]])
def testMulticlassWithRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=5))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],
[0.02, 0.2, 2., 20., 200.],
[0.03, 0.3, 3., 30., 300.]]))
self.assertAllClose(output.eval(), [[14., 140., 1400., 14000., 140000.],
[1.4, 14., 140., 1400., 14000.]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3], [0.4
]]))
self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])
def testBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 1
sess.run(column_to_variable[bucket][0].assign(
[[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))
self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])
def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=5))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 5
sess.run(column_to_variable[bucket][0].assign(
[[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],
[0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],
[5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],
[7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))
self.assertAllClose(
output.eval(),
[[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],
[0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],
[0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[country_price],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4
self.assertAllClose(output.eval(), [[0.8]])
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
country_language_price = tf.contrib.layers.crossed_column(
set([country_language, price_bucket]),
hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language_price],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testIntegerizedColumn(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[0, 4, 2],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithInvalidId(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[5, 4, 7],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testMulticlassWithOnlyBias(self):
with tf.Graph().as_default():
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [tf.contrib.layers.real_valued_column("age")],
num_outputs=3)
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],
[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def testMulticlassWithRealValuedColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.real_valued_column("age")
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (1, 3))
sess.run(weights.assign([[0.01, 0.03, 0.05]]))
self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],
[0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])
def testMulticlassWithSparseColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "arabic", "hindi", "russian", "swahili"])
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "arabic", "russian"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9], [0.1, 0.4, 0.7],
[0.2, 0.5, 0.8], [0.4, 0.7, 1.0]])
def testMulticlassWithBucketizedColumn(self):
column = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 100., 500., 1000.])
with tf.Graph().as_default():
# buckets 0, 2, 1, 2
features = {"price": tf.constant([[-3], [110], [20.], [210]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7], [0.3, 0.6, 0.9],
[0.2, 0.5, 0.8], [0.3, 0.6, 0.9]])
def testMulticlassWithCrossedColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=2)
column = tf.contrib.layers.crossed_column(
{language, country}, hash_bucket_size=5)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["english", "spanish", "russian", "swahili"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1]),
"country": tf.SparseTensor(values=["US", "SV", "RU", "KE"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(tf.shape(output).eval(), [4, 3])
def testMulticlassWithMultivalentColumn(self):
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi", "russian", "swahili"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "turkish", "turkish", "english"],
indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],
shape=[4, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6], [0.2, 0.5, 0.8],
[0.2, 0.5, 0.8], [0.1, 0.4, 0.7]])
def testVariablesAddedToCollection(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_price, price_bucket],
num_outputs=1,
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# 3 = bias + price_bucket + country_price
self.assertEqual(3, len(weights))
class ParseExampleTest(tf.test.TestCase):
def testParseExample(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", dimension=3),
boundaries=[0., 10., 100.])
wire_cast = tf.contrib.layers.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
# buckets 2, 3, 0
data = tf.train.Example(features=tf.train.Features(feature={
"price": tf.train.Feature(float_list=tf.train.FloatList(value=[20., 110,
-3])),
"wire_cast": tf.train.Feature(bytes_list=tf.train.BytesList(value=[
b"stringer", b"marlo"
])),
}))
output = tf.contrib.layers.parse_feature_columns_from_examples(
serialized=[data.SerializeToString()],
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
with self.test_session():
tf.initialize_all_tables().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])
def testParseSequenceExample(self):
location_keys = ["east_side", "west_side", "nyc"]
embedding_dimension = 10
location = tf.contrib.layers.sparse_column_with_keys(
"location", keys=location_keys)
location_onehot = tf.contrib.layers.one_hot_column(location)
wire_cast = tf.contrib.layers.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
wire_cast_embedded = tf.contrib.layers.embedding_column(
wire_cast, dimension=embedding_dimension)
measurements = tf.contrib.layers.real_valued_column("measurements", dimension=2)
context_feature_columns = [location_onehot]
sequence_feature_columns = [wire_cast_embedded, measurements]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"location": tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[b"west_side"])),
}),
feature_lists=tf.train.FeatureLists(feature_list={
"wire_cast": tf.train.FeatureList(feature=[
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[b"marlo", b"stringer"])),
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[b"omar", b"stringer", b"marlo"])),
tf.train.Feature(bytes_list=tf.train.BytesList(
value=[b"marlo"])),
]),
"measurements": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(
value=[0.2, 0.3])),
tf.train.Feature(float_list=tf.train.FloatList(
value=[0.1, 0.8])),
tf.train.Feature(float_list=tf.train.FloatList(
value=[0.5, 0.0])),
])
}))
ctx, seq = tf.contrib.layers.parse_feature_columns_from_sequence_examples(
serialized=sequence_example.SerializeToString(),
context_feature_columns=context_feature_columns,
sequence_feature_columns=sequence_feature_columns)
self.assertIn("location", ctx)
self.assertIsInstance(ctx["location"], tf.SparseTensor)
self.assertIn("wire_cast", seq)
self.assertIsInstance(seq["wire_cast"], tf.SparseTensor)
self.assertIn("measurements", seq)
self.assertIsInstance(seq["measurements"], tf.Tensor)
with self.test_session() as sess:
location_val, wire_cast_val, measurement_val = sess.run([
ctx["location"], seq["wire_cast"], seq["measurements"]])
self.assertAllEqual(location_val.indices, np.array([[0]]))
self.assertAllEqual(location_val.values, np.array([b"west_side"]))
self.assertAllEqual(location_val.shape, np.array([1]))
self.assertAllEqual(wire_cast_val.indices, np.array(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0]]))
self.assertAllEqual(wire_cast_val.values, np.array(
[b"marlo", b"stringer", b"omar", b"stringer", b"marlo", b"marlo"]))
self.assertAllEqual(wire_cast_val.shape, np.array([3, 3]))
self.assertAllClose(
measurement_val, np.array([[0.2, 0.3], [0.1, 0.8], [0.5, 0.0]]))
class InferRealValuedColumnTest(tf.test.TestCase):
def testTensorInt32(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.int32)),
[tf.contrib.layers.real_valued_column("", dimension=4, dtype=tf.int32)])
def testTensorInt64(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.int64)),
[tf.contrib.layers.real_valued_column("", dimension=4, dtype=tf.int64)])
def testTensorFloat32(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.float32)),
[tf.contrib.layers.real_valued_column(
"", dimension=4, dtype=tf.float32)])
def testTensorFloat64(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.float64)),
[tf.contrib.layers.real_valued_column(
"", dimension=4, dtype=tf.float64)])
def testDictionary(self):
self.assertItemsEqual(
tf.contrib.layers.infer_real_valued_columns({
"a": tf.zeros(shape=[33, 4], dtype=tf.int32),
"b": tf.zeros(shape=[3, 2], dtype=tf.float32)
}),
[tf.contrib.layers.real_valued_column(
"a", dimension=4, dtype=tf.int32),
tf.contrib.layers.real_valued_column(
"b", dimension=2, dtype=tf.float32)])
def testNotGoodDtype(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.constant([["a"]], dtype=tf.string))
def testSparseTensor(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.SparseTensor(indices=[[0, 0]], values=["a"], shape=[1, 1]))
if __name__ == "__main__":
tf.test.main()
|
koobonil/Boss2D
|
refs/heads/master
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/lib/io/file_io_test.py
|
31
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Testing File IO operations in file_io.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
class FileIoTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testFileDoesntExist(self):
file_path = os.path.join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
def testWriteToString(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testReadBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testWriteBinaryMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = os.path.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
def testFileDelete(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = os.path.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = os.path.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [os.path.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(os.path.join(dir_path, "file*.txt")),
expected_match)
self.assertItemsEqual(file_io.get_matching_files(tuple()), [])
files_subset = [
os.path.join(dir_path, files[0]), os.path.join(dir_path, files[2])
]
self.assertItemsEqual(
file_io.get_matching_files(files_subset), files_subset)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(os.path.join(dir_path, "file3.txt")))
def testCreateRecursiveDir(self):
dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = os.path.join(dir_path, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
def testCopy(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
def testRename(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = os.path.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = os.path.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
def testIsDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = os.path.join(dir_path, "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
def testListDirectory(self):
dir_path = os.path.join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = os.path.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = os.path.join(dir_path, "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = os.path.join(subdir_path, "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
os.path.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(os.path.join(dir_path, name))
file_io.FileIO(
os.path.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(os.path.join(dir_path, "subdir1_2/subdir2"))
def testWalkInOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path] + [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path, all_dirs[0])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = os.path.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
os.path.join(dir_path, item)
for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(os.path.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(os.path.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = os.path.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
def testStat(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(file_path)
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testTell(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = os.path.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = os.path.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
if __name__ == "__main__":
test.main()
|
ktan2020/legacy-automation
|
refs/heads/master
|
win/Lib/site-packages/selenium/webdriver/support/events.py
|
55
|
#!/usr/bin/python
#
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .abstract_event_listener import AbstractEventListener
from .event_firing_webdriver import EventFiringWebDriver
|
xuxiandi/gensim
|
refs/heads/develop
|
gensim/test/__init__.py
|
97
|
"""
This package contains automated code tests for all other gensim packages.
"""
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/youtube_dl/extractor/beeg.py
|
2
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import (
int_or_none,
parse_iso8601,
urljoin,
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
'timestamp': 1391813355,
'upload_date': '20140207',
'duration': 383,
'tags': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
cpl_url = self._search_regex(
r'<script[^>]+src=(["\'])(?P<url>(?:/static|(?:https?:)?//static\.beeg\.com)/cpl/\d+\.js.*?)\1',
webpage, 'cpl', default=None, group='url')
cpl_url = urljoin(url, cpl_url)
beeg_version, beeg_salt = [None] * 2
if cpl_url:
cpl = self._download_webpage(
self._proto_relative_url(cpl_url), video_id,
'Downloading cpl JS', fatal=False)
if cpl:
beeg_version = int_or_none(self._search_regex(
r'beeg_version\s*=\s*([^\b]+)', cpl,
'beeg version', default=None)) or self._search_regex(
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
beeg_salt = self._search_regex(
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg salt',
default=None, group='beeg_salt')
beeg_version = beeg_version or '2185'
beeg_salt = beeg_salt or 'pmweAkq8lAYKdfWcFCUj0yoVgoPlinamH5UE1CB3H'
video = self._download_json(
'https://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
video_id)
def split(o, e):
def cut(s, x):
n.append(s[:x])
return s[x:]
n = []
r = len(o) % e
if r > 0:
o = cut(o, r)
while len(o) > e:
o = cut(o, e)
n.append(o)
return n
def decrypt_key(key):
# Reverse engineered from http://static.beeg.com/cpl/1738.js
a = beeg_salt
e = compat_urllib_parse_unquote(key)
o = ''.join([
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
for n in range(len(e))])
return ''.join(split(o, 3)[::-1])
def decrypt_url(encrypted_url):
encrypted_url = self._proto_relative_url(
encrypted_url.replace('{DATA_MARKERS}', ''), 'https:')
key = self._search_regex(
r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
if not key:
return encrypted_url
return encrypted_url.replace(key, decrypt_key(key))
formats = []
for format_id, video_url in video.items():
if not video_url:
continue
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
if not height:
continue
formats.append({
'url': decrypt_url(video_url),
'format_id': format_id,
'height': int(height),
})
self._sort_formats(formats)
title = video['title']
video_id = video.get('id') or video_id
display_id = video.get('code')
description = video.get('desc')
timestamp = parse_iso8601(video.get('date'), ' ')
duration = int_or_none(video.get('duration'))
tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'tags': tags,
'formats': formats,
'age_limit': self._rta_search(webpage),
}
|
MichaelMraka/dnf
|
refs/heads/master
|
dnf/i18n.py
|
6
|
# i18n.py
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from __future__ import unicode_literals
from dnf.pycomp import PY3, is_py3bytes, unicode, setlocale
import gettext
import locale
import os
import sys
import unicodedata
"""
Centralize i18n stuff here. Must be unittested.
"""
class UnicodeStream(object):
def __init__(self, stream, encoding):
self.stream = stream
self.encoding = encoding
def write(self, s):
if not isinstance(s, str):
s = (s.decode(self.encoding, 'replace') if PY3 else
s.encode(self.encoding, 'replace'))
self.stream.write(s)
def __getattr__(self, name):
return getattr(self.stream, name)
def _full_ucd_support(encoding):
"""Return true if encoding can express any Unicode character.
Even if an encoding can express all accented letters in the given language,
we can't generally settle for it in DNF since sometimes we output special
characters like the registered trademark symbol (U+00AE) and surprisingly
many national non-unicode encodings, including e.g. ASCII and ISO-8859-2,
don't contain it.
"""
if encoding is None:
return False
lower = encoding.lower()
if lower.startswith('utf-') or lower.startswith('utf_'):
return True
return False
def _guess_encoding():
""" Take the best shot at the current system's string encoding. """
encoding = locale.getpreferredencoding()
return 'utf-8' if encoding.startswith("ANSI") else encoding
def setup_locale():
try:
setlocale(locale.LC_ALL, '')
# set time to C so that we output sane things in the logs (#433091)
setlocale(locale.LC_TIME, 'C')
except locale.Error as e:
# default to C locale if we get a failure.
print('Failed to set locale, defaulting to C', file=sys.stderr)
os.environ['LC_ALL'] = 'C'
setlocale(locale.LC_ALL, 'C')
def setup_stdout():
""" Check that stdout is of suitable encoding and handle the situation if
not.
Returns True if stdout was of suitable encoding already and no changes
were needed.
"""
stdout = sys.stdout
try:
encoding = stdout.encoding
except AttributeError:
encoding = None
if not _full_ucd_support(encoding):
sys.stdout = UnicodeStream(stdout, _guess_encoding())
return False
return True
def ucd_input(ucstring):
""" Take input from user.
What the raw_input() built-in does, but encode the prompt first
(raw_input() won't check sys.stdout.encoding as e.g. print does, see
test_i18n.TestInput.test_assumption()).
"""
if not isinstance(ucstring, unicode):
raise TypeError("input() accepts Unicode strings")
if PY3:
return input(ucstring)
enc = sys.stdout.encoding if sys.stdout.encoding else 'utf8'
s = ucstring.encode(enc, 'strict')
return raw_input(s)
def ucd(obj):
""" Like the builtin unicode() but tries to use a reasonable encoding. """
if PY3:
if is_py3bytes(obj):
return str(obj, _guess_encoding(), errors='ignore')
elif isinstance(obj, str):
return obj
return str(obj)
else:
if isinstance(obj, unicode):
return obj
if hasattr(obj, '__unicode__'):
# see the doc for the unicode() built-in. The logic here is: if obj
# implements __unicode__, let it take a crack at it, but handle the
# situation if it fails:
try:
return unicode(obj)
except UnicodeError:
pass
return unicode(str(obj), _guess_encoding(), errors='ignore')
# functions for formating output according to terminal width,
# They should be used instead of build-in functions to count on different
# widths of Unicode characters
def _exact_width_char(uchar):
return 2 if unicodedata.east_asian_width(uchar) in ('W', 'F') else 1
def chop_str(msg, chop=None):
""" Return the textual width of a Unicode string, chopping it to
a specified value. This is what you want to use instead of %.*s, as it
does the "right" thing with regard to different Unicode character width
Eg. "%.*s" % (10, msg) <= becomes => "%s" % (chop_str(msg, 10)) """
if chop is None:
return exact_width(msg), msg
width = 0
chopped_msg = ""
for char in msg:
char_width = _exact_width_char(char)
if width + char_width > chop:
break
chopped_msg += char
width += char_width
return width, chopped_msg
def exact_width(msg):
""" Calculates width of char at terminal screen
(Asian char counts for two) """
return sum(_exact_width_char(c) for c in msg)
def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
""" Expand a msg to a specified "width" or chop to same.
Expansion can be left or right. This is what you want to use instead of
%*.*s, as it does the "right" thing with regard to different Unicode
character width.
prefix and suffix should be used for "invisible" bytes, like
highlighting.
Examples:
``"%-*.*s" % (10, 20, msg)`` becomes
``"%s" % (fill_exact_width(msg, 10, 20))``.
``"%20.10s" % (msg)`` becomes
``"%s" % (fill_exact_width(msg, 20, 10, left=False))``.
``"%s%.10s%s" % (pre, msg, suf)`` becomes
``"%s" % (fill_exact_width(msg, 0, 10, prefix=pre, suffix=suf))``.
"""
width, msg = chop_str(msg, chop)
if width >= fill:
if prefix or suffix:
msg = ''.join([prefix, msg, suffix])
else:
extra = " " * (fill - width)
if left:
msg = ''.join([prefix, msg, suffix, extra])
else:
msg = ''.join([extra, prefix, msg, suffix])
return msg
def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
""" Works like we want textwrap.wrap() to work, uses Unicode strings
and doesn't screw up lists/blocks/etc. """
def _indent_at_beg(line):
count = 0
byte = 'X'
for byte in line:
if byte != ' ':
break
count += 1
if byte not in ("-", "*", ".", "o", '\xe2'):
return count, 0
list_chr = chop_str(line[count:], 1)[1]
if list_chr in ("-", "*", ".", "o",
"\u2022", "\u2023", "\u2218"):
nxt = _indent_at_beg(line[count+len(list_chr):])
nxt = nxt[1] or nxt[0]
if nxt:
return count, count + 1 + nxt
return count, 0
text = text.rstrip('\n')
lines = text.replace('\t', ' ' * 8).split('\n')
ret = []
indent = initial_indent
wrap_last = False
csab = 0
cspc_indent = 0
for line in lines:
line = line.rstrip(' ')
(lsab, lspc_indent) = (csab, cspc_indent)
(csab, cspc_indent) = _indent_at_beg(line)
force_nl = False # We want to stop wrapping under "certain" conditions:
if wrap_last and cspc_indent: # if line starts a list or
force_nl = True
if wrap_last and csab == len(line): # is empty line
force_nl = True
# if line doesn't continue a list and is "block indented"
if wrap_last and not lspc_indent:
if csab >= 4 and csab != lsab:
force_nl = True
if force_nl:
ret.append(indent.rstrip(' '))
indent = subsequent_indent
wrap_last = False
if csab == len(line): # empty line, remove spaces to make it easier.
line = ''
if wrap_last:
line = line.lstrip(' ')
cspc_indent = lspc_indent
if exact_width(indent + line) <= width:
wrap_last = False
ret.append(indent + line)
indent = subsequent_indent
continue
wrap_last = True
words = line.split(' ')
line = indent
spcs = cspc_indent
if not spcs and csab >= 4:
spcs = csab
for word in words:
if (width < exact_width(line + word)) and \
(exact_width(line) > exact_width(subsequent_indent)):
ret.append(line.rstrip(' '))
line = subsequent_indent + ' ' * spcs
line += word
line += ' '
indent = line.rstrip(' ') + ' '
if wrap_last:
ret.append(indent.rstrip(' '))
return '\n'.join(ret)
# setup translation
t = gettext.translation('dnf', fallback=True)
_ = t.ugettext
P_ = t.ungettext
|
quozl/sugar
|
refs/heads/master
|
tests/views/journal_detailstoolbox.py
|
2
|
# Copyright (C) 2013, Walter Bender
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from gi.repository import Gtk
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
from jarabe import config
from jarabe.journal.journaltoolbox import DetailToolbox
from jarabe.journal.journalwindow import JournalWindow
from jarabe.webservice.account import Account
ACCOUNT_NAME = 'mock'
class JournalMock(JournalWindow):
def get_mount_point(self):
return '/'
tests_dir = os.getcwd()
extension_dir = os.path.join(tests_dir, 'extensions')
os.environ["MOCK_ACCOUNT_STATE"] = str(Account.STATE_VALID)
config.ext_path = extension_dir
sys.path.append(config.ext_path)
window = Gtk.Window()
toolbox = DetailToolbox(JournalMock())
toolbox.show()
window.add(toolbox)
window.show()
toolbox.set_metadata({'mountpoint': '/', 'uid': '', 'title': 'mock'})
toolbox._copy.palette.popup(immediate=True)
Gtk.main()
|
neumerance/cloudloon2
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/django/middleware/csrf.py
|
32
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import hashlib
import logging
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.encoding import force_text
from django.utils.http import same_origin
from django.utils.crypto import constant_time_compare, get_random_string
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s',
REASON_NO_REFERER, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port.
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s',
REASON_NO_CSRF_COOKIE, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s',
REASON_BAD_TOKEN, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
|
kartikluke/yotube
|
refs/heads/master
|
oauth2client/service_account.py
|
1
|
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service account credentials class.
This credentials class is implemented on top of rsa library.
"""
import base64
import rsa
import time
import types
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import util
from oauth2client.anyjson import simplejson
from oauth2client.client import AssertionCredentials
from pyasn1.codec.ber import decoder
from pyasn1_modules.rfc5208 import PrivateKeyInfo
class _ServiceAccountCredentials(AssertionCredentials):
"""Class representing a service account (signed JWT) credential."""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self, service_account_id, service_account_email, private_key_id,
private_key_pkcs8_text, scopes, user_agent=None,
token_uri=GOOGLE_TOKEN_URI, revoke_uri=GOOGLE_REVOKE_URI, **kwargs):
super(_ServiceAccountCredentials, self).__init__(
None, user_agent=user_agent, token_uri=token_uri, revoke_uri=revoke_uri)
self._service_account_id = service_account_id
self._service_account_email = service_account_email
self._private_key_id = private_key_id
self._private_key = _get_private_key(private_key_pkcs8_text)
self._private_key_pkcs8_text = private_key_pkcs8_text
self._scopes = util.scopes_to_string(scopes)
self._user_agent = user_agent
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._kwargs = kwargs
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
header = {
'alg': 'RS256',
'typ': 'JWT',
'kid': self._private_key_id
}
now = long(time.time())
payload = {
'aud': self._token_uri,
'scope': self._scopes,
'iat': now,
'exp': now + _ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self._service_account_email
}
payload.update(self._kwargs)
assertion_input = '%s.%s' % (
_urlsafe_b64encode(header),
_urlsafe_b64encode(payload))
# Sign the assertion.
signature = base64.urlsafe_b64encode(rsa.pkcs1.sign(
assertion_input, self._private_key, 'SHA-256')).rstrip('=')
return '%s.%s' % (assertion_input, signature)
def sign_blob(self, blob):
return (self._private_key_id,
rsa.pkcs1.sign(blob, self._private_key, 'SHA-256'))
@property
def service_account_email(self):
return self._service_account_email
@property
def serialization_data(self):
return {
'type': 'service_account',
'client_id': self._service_account_id,
'client_email': self._service_account_email,
'private_key_id': self._private_key_id,
'private_key': self._private_key_pkcs8_text
}
def create_scoped_required(self):
return not self._scopes
def create_scoped(self, scopes):
return _ServiceAccountCredentials(self._service_account_id,
self._service_account_email,
self._private_key_id,
self._private_key_pkcs8_text,
scopes,
user_agent=self._user_agent,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def _urlsafe_b64encode(data):
return base64.urlsafe_b64encode(
simplejson.dumps(data, separators = (',', ':'))\
.encode('UTF-8')).rstrip('=')
def _get_private_key(private_key_pkcs8_text):
"""Get an RSA private key object from a pkcs8 representation."""
der = rsa.pem.load_pem(private_key_pkcs8_text, 'PRIVATE KEY')
asn1_private_key, _ = decoder.decode(der, asn1Spec=PrivateKeyInfo())
return rsa.PrivateKey.load_pkcs1(
asn1_private_key.getComponentByName('privateKey').asOctets(),
format='DER')
|
Dandandan/wikiprogramming
|
refs/heads/master
|
jsrepl/extern/python/unclosured/lib/python2.7/encodings/utf_16_le.py
|
860
|
""" Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
a0c/odoo
|
refs/heads/master
|
openerp/addons/base/res/res_users.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
import logging
from functools import partial
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
import openerp.exceptions
from openerp.osv import fields,osv, expression
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp.http import request
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class res_groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(res_groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(res_groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(res_groups, self).write(cr, uid, ids, vals, context=context)
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
return res
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
_uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user'),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.html('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
}
def on_change_login(self, cr, uid, ids, login, context=None):
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
user = self.pool['res.users'].read(cr, uid, uid2, ['company_id'], context)
company_id = user.get('company_id', False)
return company_id and company_id[0] or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'password': '',
'active': True,
'customer': False,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'image': lambda self, cr, uid, ctx={}: self.pool['res.partner']._get_default_image(cr, uid, False, ctx, colorize=True),
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if 'password' in o and ('id' not in o or o['id'] != uid):
o['password'] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
if not (values['company_id'] in self.read(cr, SUPERUSER_ID, uid, ['company_ids'], context=context)['company_ids']):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear caches linked to the users
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
clear = partial(self.pool['ir.rule'].clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
self.context_get.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by OpenERP (updates, module installation, ...)'))
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
@tools.ormcache(skiparg=2)
def context_get(self, cr, uid, context=None):
user = self.browse(cr, SUPERUSER_ID, uid, context)
result = {}
for k in self._all_columns.keys():
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user,k) or False
if isinstance(res, browse_record):
res = res.id
result[context_key] = res or False
return result
def action_get(self, cr, uid, context=None):
dataobj = self.pool['ir.model.data']
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
if passwd == tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def login(self, db, login, password):
if not password:
return False
user_id = False
cr = self.pool.cursor()
try:
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
# check if user exists
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
# check credentials
self.check_credentials(cr, user_id, password)
# We effectively unconditionally write the res_users line.
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
# Failing to acquire the lock on the res_users row probably means
# another request is holding it. No big deal, we don't want to
# prevent/delay login in that case. It will also have been logged
# as a SQL error, if anyone cares.
try:
cr.execute("SELECT id FROM res_users WHERE id=%s FOR UPDATE NOWAIT", (user_id,), log_exceptions=False)
cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,))
except Exception:
_logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
finally:
cr.close()
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self.login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = self.pool.cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool['ir.config_parameter']
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self._uid_cache.get(db, {}).get(uid) == passwd:
return
cr = self.pool.cursor()
try:
self.check_credentials(cr, uid, passwd)
if self._uid_cache.has_key(db):
self._uid_cache[db][uid] = passwd
else:
self._uid_cache[db] = {uid:passwd}
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
#----------------------------------------------------------
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied"
# or "inherited" groups. Once a user belongs to a group, it
# automatically belongs to the implied groups (transitively).
#----------------------------------------------------------
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
concat = itertools.chain.from_iterable
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids, context=context):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
self.pool['ir.ui.view'].clear_cache()
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
self.pool['ir.ui.view'].clear_cache()
return res
#----------------------------------------------------------
# Vitrual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - boolean field 'in_groups_ID1_..._IDk' is True iff
# any of ID1, ..., IDk is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#----------------------------------------------------------
def name_boolean_group(id): return 'in_group_' + str(id)
def name_boolean_groups(ids): return 'in_groups_' + '_'.join(map(str, ids))
def name_selection_groups(ids): return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name): return name.startswith('in_group_')
def is_boolean_groups(name): return name.startswith('in_groups_')
def is_selection_groups(name): return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_boolean_groups(name) or is_selection_groups(name)
def get_boolean_group(name): return int(name[9:])
def get_boolean_groups(name): return map(int, name[10:].split('_'))
def get_selection_groups(name): return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
# we have to try-catch this, because at first init the view does not exist
# but we are already creating some basic groups
view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=context)
if view and view.exists() and view._table_name == 'ir.ui.view':
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name, **attrs))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _set_reified_groups(self, values):
""" reflect reified group fields in values['groups_id'] """
if 'groups_id' in values:
# groups are already given, ignore group fields
for f in filter(is_reified_group, values.iterkeys()):
del values[f]
return
add, remove = [], []
for f in values.keys():
if is_boolean_group(f):
target = add if values.pop(f) else remove
target.append(get_boolean_group(f))
elif is_boolean_groups(f):
if not values.pop(f):
remove.extend(get_boolean_groups(f))
elif is_selection_groups(f):
remove.extend(get_selection_groups(f))
selected = values.pop(f)
if selected:
add.append(selected)
# update values *only* if groups are being modified, otherwise
# we introduce spurious changes that might break the super.write() call.
if add or remove:
# remove groups in 'remove' and add groups in 'add'
values['groups_id'] = [(3, id) for id in remove] + [(4, id) for id in add]
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._get_reified_groups(group_fields, values)
# add "default_groups_ref" inside the context to set default value for group_id with xml values
if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list):
groups = []
ir_model_data = self.pool.get('ir.model.data')
for group_xml_id in context["default_groups_ref"]:
group_split = group_xml_id.split('.')
if len(group_split) != 2:
raise osv.except_osv(_('Invalid context value'), _('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id)
try:
temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1])
except ValueError:
group_id = False
groups += [group_id]
values['groups_id'] = groups
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
fields_get = fields if fields is not None else self.fields_get(cr, uid, context=context).keys()
group_fields, _ = partition(is_reified_group, fields_get)
inject_groups_id = group_fields and fields and 'groups_id' not in fields
if inject_groups_id:
fields.append('groups_id')
res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load)
if res and group_fields:
for values in (res if isinstance(res, list) else [res]):
self._get_reified_groups(group_fields, values)
if inject_groups_id:
values.pop('groups_id', None)
return res
def _get_reified_groups(self, fields, values):
""" compute the given reified group fields from values['groups_id'] """
gids = set(values.get('groups_id') or [])
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_boolean_groups(f):
values[f] = not gids.isdisjoint(get_boolean_groups(f))
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access)
# add reified groups fields
for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class change_password_wizard(osv.TransientModel):
"""
A wizard to manage the change of users' passwords
"""
_name = "change.password.wizard"
_description = "Change Password Wizard"
_columns = {
'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'),
}
def default_get(self, cr, uid, fields, context=None):
if context == None:
context = {}
user_ids = context.get('active_ids', [])
wiz_id = context.get('active_id', None)
res = []
users = self.pool.get('res.users').browse(cr, uid, user_ids, context=context)
for user in users:
res.append((0, 0, {
'wizard_id': wiz_id,
'user_id': user.id,
'user_login': user.login,
}))
return {'user_ids': res}
def change_password_button(self, cr, uid, id, context=None):
wizard = self.browse(cr, uid, id, context=context)[0]
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
# don't keep temporary password copies in the database longer than necessary
self.pool.get('change.password.user').write(cr, uid, line_ids, {'new_passwd': False}, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""
A model to configure users in the change password wizard
"""
_name = 'change.password.user'
_description = 'Change Password Wizard User'
_columns = {
'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True),
'user_id': fields.many2one('res.users', string='User', required=True),
'user_login': fields.char('User Login', readonly=True),
'new_passwd': fields.char('New Password'),
}
_defaults = {
'new_passwd': '',
}
def change_password_button(self, cr, uid, ids, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.pool.get('res.users').write(cr, uid, user.user_id.id, {'password': user.new_passwd})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nsoranzo/tools-iuc
|
refs/heads/master
|
tools/query_tabular/filters.py
|
11
|
#!/usr/binsenv python
from __future__ import print_function
import re
import sys
class LineFilter(object):
def __init__(self, source, filter_dict):
self.source = source
self.filter_dict = filter_dict
self.func = lambda i, l: l.rstrip('\r\n') if l else None
self.src_lines = []
self.src_line_cnt = 0
if not filter_dict:
return
if filter_dict['filter'] == 'regex':
rgx = re.compile(filter_dict['pattern'])
if filter_dict['action'] == 'exclude_match':
self.func = lambda i, l: l if not rgx.match(l) else None
elif filter_dict['action'] == 'include_match':
self.func = lambda i, l: l if rgx.match(l) else None
elif filter_dict['action'] == 'exclude_find':
self.func = lambda i, l: l if not rgx.search(l) else None
elif filter_dict['action'] == 'include_find':
self.func = lambda i, l: l if rgx.search(l) else None
elif filter_dict['filter'] == 'select_columns':
cols = [int(c) - 1 for c in filter_dict['columns']]
self.func = lambda i, l: self.select_columns(l, cols)
elif filter_dict['filter'] == 'replace':
p = filter_dict['pattern']
r = filter_dict['replace']
c = int(filter_dict['column']) - 1
if 'add' not in filter_dict\
or filter_dict['add'] not in ['prepend',
'append',
'before',
'after']:
self.func = lambda i, l: '\t'.join(
[x if j != c else re.sub(p, r, x)
for j, x in enumerate(l.split('\t'))])
else:
a = 0 if filter_dict['add'] == 'prepend'\
else min(0, c - 1) if filter_dict['add'] == 'before'\
else c + 1 if filter_dict['add'] == 'after'\
else None
self.func = lambda i, l: self.replace_add(l, p, r, c, a)
elif filter_dict['filter'] == 'prepend_line_num':
self.func = lambda i, l: '%d\t%s' % (i, l)
elif filter_dict['filter'] == 'append_line_num':
self.func = lambda i, l: '%s\t%d' % (l.rstrip('\r\n'), i)
elif filter_dict['filter'] == 'prepend_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (s, l)
elif filter_dict['filter'] == 'append_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (l.rstrip('\r\n'), s)
elif filter_dict['filter'] == 'skip':
cnt = filter_dict['count']
self.func = lambda i, l: l if i > cnt else None
elif filter_dict['filter'] == 'normalize':
cols = [int(c) - 1 for c in filter_dict['columns']]
sep = filter_dict['separator']
self.func = lambda i, l: self.normalize(l, cols, sep)
def __iter__(self):
return self
def __next__(self):
if not self.src_lines:
self.get_lines()
if self.src_lines:
return self.src_lines.pop(0)
raise StopIteration
next = __next__
def select_columns(self, line, cols):
fields = line.split('\t')
return '\t'.join([fields[x] for x in cols])
def replace_add(self, line, pat, rep, col, pos):
fields = line.rstrip('\r\n').split('\t')
i = pos if pos is not None else len(fields)
val = ''
if col < len(fields) and re.search(pat, fields[col]):
val = re.sub(pat, rep, fields[col]).replace('\t', ' ')
return '\t'.join(fields[:i] + [val] + fields[i:])
def normalize(self, line, split_cols, sep):
lines = []
fields = line.rstrip('\r\n').split('\t')
split_fields = dict()
cnt = 0
for c in split_cols:
if c < len(fields):
split_fields[c] = fields[c].split(sep)
cnt = max(cnt, len(split_fields[c]))
if cnt == 0:
lines.append('\t'.join(fields))
else:
for n in range(0, cnt):
flds = [x if c not in split_cols else split_fields[c][n]
if n < len(split_fields[c])
else '' for (c, x) in enumerate(fields)]
lines.append('\t'.join(flds))
return lines
def get_lines(self):
for i, next_line in enumerate(self.source):
self.src_line_cnt += 1
line = self.func(self.src_line_cnt, next_line)
if line:
if isinstance(line, list):
self.src_lines.extend(line)
else:
self.src_lines.append(line)
return
class TabularReader:
"""
Tabular file iterator. Returns a list
"""
def __init__(self, input_file, skip=0, comment_char=None, col_idx=None,
filters=None):
self.skip = skip
self.comment_char = comment_char
self.col_idx = col_idx
self.filters = filters
self.tsv_file = \
input_file if hasattr(input_file, 'readline') else open(input_file)
if skip and skip > 0:
for i in range(skip):
if not self.tsv_file.readline():
break
source = LineFilter(self.tsv_file, None)
if comment_char:
source = LineFilter(source,
{"filter": "regex", "pattern": comment_char,
"action": "exclude_match"})
if filters:
for f in filters:
source = LineFilter(source, f)
self.source = source
def __iter__(self):
return self
def __next__(self):
''' Iteration '''
for i, line in enumerate(self.source):
fields = line.rstrip('\r\n').split('\t')
if self.col_idx:
fields = [fields[i] for i in self.col_idx]
return fields
raise StopIteration
next = __next__
def filter_file(input_file, output, skip=0, comment_char='#', filters=None):
data_lines = 0
try:
tr = TabularReader(input_file, skip=skip, comment_char=comment_char,
filters=filters)
for linenum, fields in enumerate(tr):
data_lines += 1
try:
output.write('%s\n' % '\t'.join(fields))
except Exception as e:
print('Failed at line: %d err: %s' % (linenum, e),
file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
|
yfried/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/aruba/aruba.py
|
84
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
aruba_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
aruba_argument_spec = {
'provider': dict(type='dict', options=aruba_provider_spec)
}
aruba_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
aruba_argument_spec.update(aruba_top_spec)
def get_provider_argspec():
return aruba_provider_spec
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def sanitize(resp):
# Takes response from device and adjusts leading whitespace to just 1 space
cleaned = []
for line in resp.splitlines():
cleaned.append(re.sub(r"^\s+", " ", line))
return '\n'.join(cleaned).strip()
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
for command in to_list(commands):
if command == 'end':
continue
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
exec_command(module, 'end')
|
patrickm/chromium.src
|
refs/heads/nw
|
third_party/tlslite/tlslite/HandshakeSettings.py
|
359
|
"""Class for setting handshake parameters."""
from constants import CertificateType
from utils import cryptomath
from utils import cipherfactory
class HandshakeSettings:
"""This class encapsulates various parameters that can be used with
a TLS handshake.
@sort: minKeySize, maxKeySize, cipherNames, certificateTypes,
minVersion, maxVersion
@type minKeySize: int
@ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
@type maxKeySize: int
@ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
@type cipherNames: list
@ivar cipherNames: The allowed ciphers, in order of preference.
The allowed values in this list are 'aes256', 'aes128', '3des', and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
NOTE: If '3des' is used in this list, but TLS Lite can't find an
add-on library that supports 3DES, then '3des' will be silently
removed.
The default value is ['aes256', 'aes128', '3des', 'rc4'].
@type certificateTypes: list
@ivar certificateTypes: The allowed certificate types, in order of
preference.
The allowed values in this list are 'x509' and 'cryptoID'. This
list is only used with a client handshake. The client will
advertise to the server which certificate types are supported, and
will check that the server uses one of the appropriate types.
NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not
installed, then 'cryptoID' will be silently removed.
@type minVersion: tuple
@ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for
TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to
use a lower version, a protocol_version alert will be signalled.
The default is (3,0).
@type maxVersion: tuple
@ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for
TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to
use a higher version, a protocol_version alert will be signalled.
The default is (3,2). (WARNING: Some servers may (improperly)
reject clients which offer support for TLS 1.1. In this case,
try lowering maxVersion to (3,1)).
"""
def __init__(self):
self.minKeySize = 1023
self.maxKeySize = 8193
self.cipherNames = ["aes256", "aes128", "3des", "rc4"]
self.cipherImplementations = ["cryptlib", "openssl", "pycrypto",
"python"]
self.certificateTypes = ["x509", "cryptoID"]
self.minVersion = (3,0)
self.maxVersion = (3,2)
#Filters out options that are not supported
def _filter(self):
other = HandshakeSettings()
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.cipherNames = self.cipherNames
other.cipherImplementations = self.cipherImplementations
other.certificateTypes = self.certificateTypes
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
if not cipherfactory.tripleDESPresent:
other.cipherNames = [e for e in self.cipherNames if e != "3des"]
if len(other.cipherNames)==0:
raise ValueError("No supported ciphers")
try:
import cryptoIDlib
except ImportError:
other.certificateTypes = [e for e in self.certificateTypes \
if e != "cryptoID"]
if len(other.certificateTypes)==0:
raise ValueError("No supported certificate types")
if not cryptomath.cryptlibpyLoaded:
other.cipherImplementations = [e for e in \
self.cipherImplementations if e != "cryptlib"]
if not cryptomath.m2cryptoLoaded:
other.cipherImplementations = [e for e in \
other.cipherImplementations if e != "openssl"]
if not cryptomath.pycryptoLoaded:
other.cipherImplementations = [e for e in \
other.cipherImplementations if e != "pycrypto"]
if len(other.cipherImplementations)==0:
raise ValueError("No supported cipher implementations")
if other.minKeySize<512:
raise ValueError("minKeySize too small")
if other.minKeySize>16384:
raise ValueError("minKeySize too large")
if other.maxKeySize<512:
raise ValueError("maxKeySize too small")
if other.maxKeySize>16384:
raise ValueError("maxKeySize too large")
for s in other.cipherNames:
if s not in ("aes256", "aes128", "rc4", "3des"):
raise ValueError("Unknown cipher name: '%s'" % s)
for s in other.cipherImplementations:
if s not in ("cryptlib", "openssl", "python", "pycrypto"):
raise ValueError("Unknown cipher implementation: '%s'" % s)
for s in other.certificateTypes:
if s not in ("x509", "cryptoID"):
raise ValueError("Unknown certificate type: '%s'" % s)
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if not other.minVersion in ((3,0), (3,1), (3,2)):
raise ValueError("minVersion set incorrectly")
if not other.maxVersion in ((3,0), (3,1), (3,2)):
raise ValueError("maxVersion set incorrectly")
return other
def _getCertificateTypes(self):
l = []
for ct in self.certificateTypes:
if ct == "x509":
l.append(CertificateType.x509)
elif ct == "cryptoID":
l.append(CertificateType.cryptoID)
else:
raise AssertionError()
return l
|
robobrobro/ballin-octo-shame
|
refs/heads/master
|
lib/Python-3.4.3/Lib/email/headerregistry.py
|
14
|
"""Representing and manipulating email headers via custom objects.
This module provides an implementation of the HeaderRegistry API.
The implementation is designed to flexibly follow RFC5322 rules.
Eventually HeaderRegistry will be a public API, but it isn't yet,
and will probably change some before that happens.
"""
from types import MappingProxyType
from email import utils
from email import errors
from email import _header_value_parser as parser
class Address:
def __init__(self, display_name='', username='', domain='', addr_spec=None):
"""Create an object represeting a full email address.
An address can have a 'display_name', a 'username', and a 'domain'. In
addition to specifying the username and domain separately, they may be
specified together by using the addr_spec keyword *instead of* the
username and domain keywords. If an addr_spec string is specified it
must be properly quoted according to RFC 5322 rules; an error will be
raised if it is not.
An Address object has display_name, username, domain, and addr_spec
attributes, all of which are read-only. The addr_spec and the string
value of the object are both quoted according to RFC5322 rules, but
without any Content Transfer Encoding.
"""
# This clause with its potential 'raise' may only happen when an
# application program creates an Address object using an addr_spec
# keyword. The email library code itself must always supply username
# and domain.
if addr_spec is not None:
if username or domain:
raise TypeError("addrspec specified when username and/or "
"domain also specified")
a_s, rest = parser.get_addr_spec(addr_spec)
if rest:
raise ValueError("Invalid addr_spec; only '{}' "
"could be parsed from '{}'".format(
a_s, addr_spec))
if a_s.all_defects:
raise a_s.all_defects[0]
username = a_s.local_part
domain = a_s.domain
self._display_name = display_name
self._username = username
self._domain = domain
@property
def display_name(self):
return self._display_name
@property
def username(self):
return self._username
@property
def domain(self):
return self._domain
@property
def addr_spec(self):
"""The addr_spec (username@domain) portion of the address, quoted
according to RFC 5322 rules, but with no Content Transfer Encoding.
"""
nameset = set(self.username)
if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
lp = parser.quote_string(self.username)
else:
lp = self.username
if self.domain:
return lp + '@' + self.domain
if not lp:
return '<>'
return lp
def __repr__(self):
return "Address(display_name={!r}, username={!r}, domain={!r})".format(
self.display_name, self.username, self.domain)
def __str__(self):
nameset = set(self.display_name)
if len(nameset) > len(nameset-parser.SPECIALS):
disp = parser.quote_string(self.display_name)
else:
disp = self.display_name
if disp:
addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
return "{} <{}>".format(disp, addr_spec)
return self.addr_spec
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.display_name == other.display_name and
self.username == other.username and
self.domain == other.domain)
class Group:
def __init__(self, display_name=None, addresses=None):
"""Create an object representing an address group.
An address group consists of a display_name followed by colon and an
list of addresses (see Address) terminated by a semi-colon. The Group
is created by specifying a display_name and a possibly empty list of
Address objects. A Group can also be used to represent a single
address that is not in a group, which is convenient when manipulating
lists that are a combination of Groups and individual Addresses. In
this case the display_name should be set to None. In particular, the
string representation of a Group whose display_name is None is the same
as the Address object, if there is one and only one Address object in
the addresses list.
"""
self._display_name = display_name
self._addresses = tuple(addresses) if addresses else tuple()
@property
def display_name(self):
return self._display_name
@property
def addresses(self):
return self._addresses
def __repr__(self):
return "Group(display_name={!r}, addresses={!r}".format(
self.display_name, self.addresses)
def __str__(self):
if self.display_name is None and len(self.addresses)==1:
return str(self.addresses[0])
disp = self.display_name
if disp is not None:
nameset = set(disp)
if len(nameset) > len(nameset-parser.SPECIALS):
disp = parser.quote_string(disp)
adrstr = ", ".join(str(x) for x in self.addresses)
adrstr = ' ' + adrstr if adrstr else adrstr
return "{}:{};".format(disp, adrstr)
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.display_name == other.display_name and
self.addresses == other.addresses)
# Header Classes #
class BaseHeader(str):
"""Base class for message headers.
Implements generic behavior and provides tools for subclasses.
A subclass must define a classmethod named 'parse' that takes an unfolded
value string and a dictionary as its arguments. The dictionary will
contain one key, 'defects', initialized to an empty list. After the call
the dictionary must contain two additional keys: parse_tree, set to the
parse tree obtained from parsing the header, and 'decoded', set to the
string value of the idealized representation of the data from the value.
(That is, encoded words are decoded, and values that have canonical
representations are so represented.)
The defects key is intended to collect parsing defects, which the message
parser will subsequently dispose of as appropriate. The parser should not,
insofar as practical, raise any errors. Defects should be added to the
list instead. The standard header parsers register defects for RFC
compliance issues, for obsolete RFC syntax, and for unrecoverable parsing
errors.
The parse method may add additional keys to the dictionary. In this case
the subclass must define an 'init' method, which will be passed the
dictionary as its keyword arguments. The method should use (usually by
setting them as the value of similarly named attributes) and remove all the
extra keys added by its parse method, and then use super to call its parent
class with the remaining arguments and keywords.
The subclass should also make sure that a 'max_count' attribute is defined
that is either None or 1. XXX: need to better define this API.
"""
def __new__(cls, name, value):
kwds = {'defects': []}
cls.parse(value, kwds)
if utils._has_surrogates(kwds['decoded']):
kwds['decoded'] = utils._sanitize(kwds['decoded'])
self = str.__new__(cls, kwds['decoded'])
del kwds['decoded']
self.init(name, **kwds)
return self
def init(self, name, *, parse_tree, defects):
self._name = name
self._parse_tree = parse_tree
self._defects = defects
@property
def name(self):
return self._name
@property
def defects(self):
return tuple(self._defects)
def __reduce__(self):
return (
_reconstruct_header,
(
self.__class__.__name__,
self.__class__.__bases__,
str(self),
),
self.__dict__)
@classmethod
def _reconstruct(cls, value):
return str.__new__(cls, value)
def fold(self, *, policy):
"""Fold header according to policy.
The parsed representation of the header is folded according to
RFC5322 rules, as modified by the policy. If the parse tree
contains surrogateescaped bytes, the bytes are CTE encoded using
the charset 'unknown-8bit".
Any non-ASCII characters in the parse tree are CTE encoded using
charset utf-8. XXX: make this a policy setting.
The returned value is an ASCII-only string possibly containing linesep
characters, and ending with a linesep character. The string includes
the header name and the ': ' separator.
"""
# At some point we need to only put fws here if it was in the source.
header = parser.Header([
parser.HeaderLabel([
parser.ValueTerminal(self.name, 'header-name'),
parser.ValueTerminal(':', 'header-sep')]),
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]),
self._parse_tree])
return header.fold(policy=policy)
def _reconstruct_header(cls_name, bases, value):
return type(cls_name, bases, {})._reconstruct(value)
class UnstructuredHeader:
max_count = None
value_parser = staticmethod(parser.get_unstructured)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = cls.value_parser(value)
kwds['decoded'] = str(kwds['parse_tree'])
class UniqueUnstructuredHeader(UnstructuredHeader):
max_count = 1
class DateHeader:
"""Header whose value consists of a single timestamp.
Provides an additional attribute, datetime, which is either an aware
datetime using a timezone, or a naive datetime if the timezone
in the input string is -0000. Also accepts a datetime as input.
The 'value' attribute is the normalized form of the timestamp,
which means it is the output of format_datetime on the datetime.
"""
max_count = None
# This is used only for folding, not for creating 'decoded'.
value_parser = staticmethod(parser.get_unstructured)
@classmethod
def parse(cls, value, kwds):
if not value:
kwds['defects'].append(errors.HeaderMissingRequiredValue())
kwds['datetime'] = None
kwds['decoded'] = ''
kwds['parse_tree'] = parser.TokenList()
return
if isinstance(value, str):
value = utils.parsedate_to_datetime(value)
kwds['datetime'] = value
kwds['decoded'] = utils.format_datetime(kwds['datetime'])
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
def init(self, *args, **kw):
self._datetime = kw.pop('datetime')
super().init(*args, **kw)
@property
def datetime(self):
return self._datetime
class UniqueDateHeader(DateHeader):
max_count = 1
class AddressHeader:
max_count = None
@staticmethod
def value_parser(value):
address_list, value = parser.get_address_list(value)
assert not value, 'this should not happen'
return address_list
@classmethod
def parse(cls, value, kwds):
if isinstance(value, str):
# We are translating here from the RFC language (address/mailbox)
# to our API language (group/address).
kwds['parse_tree'] = address_list = cls.value_parser(value)
groups = []
for addr in address_list.addresses:
groups.append(Group(addr.display_name,
[Address(mb.display_name or '',
mb.local_part or '',
mb.domain or '')
for mb in addr.all_mailboxes]))
defects = list(address_list.all_defects)
else:
# Assume it is Address/Group stuff
if not hasattr(value, '__iter__'):
value = [value]
groups = [Group(None, [item]) if not hasattr(item, 'addresses')
else item
for item in value]
defects = []
kwds['groups'] = groups
kwds['defects'] = defects
kwds['decoded'] = ', '.join([str(item) for item in groups])
if 'parse_tree' not in kwds:
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
def init(self, *args, **kw):
self._groups = tuple(kw.pop('groups'))
self._addresses = None
super().init(*args, **kw)
@property
def groups(self):
return self._groups
@property
def addresses(self):
if self._addresses is None:
self._addresses = tuple([address for group in self._groups
for address in group.addresses])
return self._addresses
class UniqueAddressHeader(AddressHeader):
max_count = 1
class SingleAddressHeader(AddressHeader):
@property
def address(self):
if len(self.addresses)!=1:
raise ValueError(("value of single address header {} is not "
"a single address").format(self.name))
return self.addresses[0]
class UniqueSingleAddressHeader(SingleAddressHeader):
max_count = 1
class MIMEVersionHeader:
max_count = 1
value_parser = staticmethod(parser.parse_mime_version)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
kwds['major'] = None if parse_tree.minor is None else parse_tree.major
kwds['minor'] = parse_tree.minor
if parse_tree.minor is not None:
kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor'])
else:
kwds['version'] = None
def init(self, *args, **kw):
self._version = kw.pop('version')
self._major = kw.pop('major')
self._minor = kw.pop('minor')
super().init(*args, **kw)
@property
def major(self):
return self._major
@property
def minor(self):
return self._minor
@property
def version(self):
return self._version
class ParameterizedMIMEHeader:
# Mixin that handles the params dict. Must be subclassed and
# a property value_parser for the specific header provided.
max_count = 1
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
if parse_tree.params is None:
kwds['params'] = {}
else:
# The MIME RFCs specify that parameter ordering is arbitrary.
kwds['params'] = {utils._sanitize(name).lower():
utils._sanitize(value)
for name, value in parse_tree.params}
def init(self, *args, **kw):
self._params = kw.pop('params')
super().init(*args, **kw)
@property
def params(self):
return MappingProxyType(self._params)
class ContentTypeHeader(ParameterizedMIMEHeader):
value_parser = staticmethod(parser.parse_content_type_header)
def init(self, *args, **kw):
super().init(*args, **kw)
self._maintype = utils._sanitize(self._parse_tree.maintype)
self._subtype = utils._sanitize(self._parse_tree.subtype)
@property
def maintype(self):
return self._maintype
@property
def subtype(self):
return self._subtype
@property
def content_type(self):
return self.maintype + '/' + self.subtype
class ContentDispositionHeader(ParameterizedMIMEHeader):
value_parser = staticmethod(parser.parse_content_disposition_header)
def init(self, *args, **kw):
super().init(*args, **kw)
cd = self._parse_tree.content_disposition
self._content_disposition = cd if cd is None else utils._sanitize(cd)
@property
def content_disposition(self):
return self._content_disposition
class ContentTransferEncodingHeader:
max_count = 1
value_parser = staticmethod(parser.parse_content_transfer_encoding_header)
@classmethod
def parse(cls, value, kwds):
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
kwds['decoded'] = str(parse_tree)
kwds['defects'].extend(parse_tree.all_defects)
def init(self, *args, **kw):
super().init(*args, **kw)
self._cte = utils._sanitize(self._parse_tree.cte)
@property
def cte(self):
return self._cte
# The header factory #
_default_header_map = {
'subject': UniqueUnstructuredHeader,
'date': UniqueDateHeader,
'resent-date': DateHeader,
'orig-date': UniqueDateHeader,
'sender': UniqueSingleAddressHeader,
'resent-sender': SingleAddressHeader,
'to': UniqueAddressHeader,
'resent-to': AddressHeader,
'cc': UniqueAddressHeader,
'resent-cc': AddressHeader,
'bcc': UniqueAddressHeader,
'resent-bcc': AddressHeader,
'from': UniqueAddressHeader,
'resent-from': AddressHeader,
'reply-to': UniqueAddressHeader,
'mime-version': MIMEVersionHeader,
'content-type': ContentTypeHeader,
'content-disposition': ContentDispositionHeader,
'content-transfer-encoding': ContentTransferEncodingHeader,
}
class HeaderRegistry:
"""A header_factory and header registry."""
def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
use_default_map=True):
"""Create a header_factory that works with the Policy API.
base_class is the class that will be the last class in the created
header class's __bases__ list. default_class is the class that will be
used if "name" (see __call__) does not appear in the registry.
use_default_map controls whether or not the default mapping of names to
specialized classes is copied in to the registry when the factory is
created. The default is True.
"""
self.registry = {}
self.base_class = base_class
self.default_class = default_class
if use_default_map:
self.registry.update(_default_header_map)
def map_to_type(self, name, cls):
"""Register cls as the specialized class for handling "name" headers.
"""
self.registry[name.lower()] = cls
def __getitem__(self, name):
cls = self.registry.get(name.lower(), self.default_class)
return type('_'+cls.__name__, (cls, self.base_class), {})
def __call__(self, name, value):
"""Create a header instance for header 'name' from 'value'.
Creates a header instance by creating a specialized class for parsing
and representing the specified header by combining the factory
base_class with a specialized class from the registry or the
default_class, and passing the name and value to the constructed
class's constructor.
"""
return self[name](name, value)
|
abhishek-ch/hue
|
refs/heads/master
|
desktop/core/ext-py/Paste-2.0.1/paste/fixture.py
|
33
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Routines for testing WSGI applications.
Most interesting is the `TestApp <class-paste.fixture.TestApp.html>`_
for testing WSGI applications, and the `TestFileEnvironment
<class-paste.fixture.TestFileEnvironment.html>`_ class for testing the
effects of command-line scripts.
"""
from __future__ import print_function
import sys
import random
import mimetypes
import time
import cgi
import os
import shutil
import smtplib
import shlex
import re
import six
import subprocess
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlencode
from six.moves.urllib import parse as urlparse
try:
# Python 3
from http.cookies import BaseCookie
from urllib.parse import splittype, splithost
except ImportError:
# Python 2
from Cookie import BaseCookie
from urllib import splittype, splithost
from paste import wsgilib
from paste import lint
from paste.response import HeaderDict
def tempnam_no_warning(*args):
"""
An os.tempnam with the warning turned off, because sometimes
you just need to use this and don't care about the stupid
security warning.
"""
return os.tempnam(*args)
class NoDefault(object):
pass
def sorted(l):
l = list(l)
l.sort()
return l
class Dummy_smtplib(object):
existing = None
def __init__(self, server):
import warnings
warnings.warn(
'Dummy_smtplib is not maintained and is deprecated',
DeprecationWarning, 2)
assert not self.existing, (
"smtplib.SMTP() called again before Dummy_smtplib.existing.reset() "
"called.")
self.server = server
self.open = True
self.__class__.existing = self
def quit(self):
assert self.open, (
"Called %s.quit() twice" % self)
self.open = False
def sendmail(self, from_address, to_addresses, msg):
self.from_address = from_address
self.to_addresses = to_addresses
self.message = msg
def install(cls):
smtplib.SMTP = cls
install = classmethod(install)
def reset(self):
assert not self.open, (
"SMTP connection not quit")
self.__class__.existing = None
class AppError(Exception):
pass
class TestApp(object):
# for py.test
disabled = True
def __init__(self, app, namespace=None, relative_to=None,
extra_environ=None, pre_request_hook=None,
post_request_hook=None):
"""
Wraps a WSGI application in a more convenient interface for
testing.
``app`` may be an application, or a Paste Deploy app
URI, like ``'config:filename.ini#test'``.
``namespace`` is a dictionary that will be written to (if
provided). This can be used with doctest or some other
system, and the variable ``res`` will be assigned everytime
you make a request (instead of returning the request).
``relative_to`` is a directory, and filenames used for file
uploads are calculated relative to this. Also ``config:``
URIs that aren't absolute.
``extra_environ`` is a dictionary of values that should go
into the environment for each request. These can provide a
communication channel with the application.
``pre_request_hook`` is a function to be called prior to
making requests (such as ``post`` or ``get``). This function
must take one argument (the instance of the TestApp).
``post_request_hook`` is a function, similar to
``pre_request_hook``, to be called after requests are made.
"""
if isinstance(app, (six.binary_type, six.text_type)):
from paste.deploy import loadapp
# @@: Should pick up relative_to from calling module's
# __file__
app = loadapp(app, relative_to=relative_to)
self.app = app
self.namespace = namespace
self.relative_to = relative_to
if extra_environ is None:
extra_environ = {}
self.extra_environ = extra_environ
self.pre_request_hook = pre_request_hook
self.post_request_hook = post_request_hook
self.reset()
def reset(self):
"""
Resets the state of the application; currently just clears
saved cookies.
"""
self.cookies = {}
def _make_environ(self):
environ = self.extra_environ.copy()
environ['paste.throw_errors'] = True
return environ
def get(self, url, params=None, headers=None, extra_environ=None,
status=None, expect_errors=False):
"""
Get the given url (well, actually a path like
``'/page.html'``).
``params``:
A query string, or a dictionary that will be encoded
into a query string. You may also include a query
string on the ``url``.
``headers``:
A dictionary of extra headers to send.
``extra_environ``:
A dictionary of environmental variables that should
be added to the request.
``status``:
The integer status code you expect (if not 200 or 3xx).
If you expect a 404 response, for instance, you must give
``status=404`` or it will be an error. You can also give
a wildcard, like ``'3*'`` or ``'*'``.
``expect_errors``:
If this is not true, then if anything is written to
``wsgi.errors`` it will be an error. If it is true, then
non-200/3xx responses are also okay.
Returns a `response object
<class-paste.fixture.TestResponse.html>`_
"""
if extra_environ is None:
extra_environ = {}
# Hide from py.test:
__tracebackhide__ = True
if params:
if not isinstance(params, (six.binary_type, six.text_type)):
params = urlencode(params, doseq=True)
if '?' in url:
url += '&'
else:
url += '?'
url += params
environ = self._make_environ()
url = str(url)
if '?' in url:
url, environ['QUERY_STRING'] = url.split('?', 1)
else:
environ['QUERY_STRING'] = ''
self._set_headers(headers, environ)
environ.update(extra_environ)
req = TestRequest(url, environ, expect_errors)
return self.do_request(req, status=status)
def _gen_request(self, method, url, params=b'', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a generic request.
"""
if headers is None:
headers = {}
if extra_environ is None:
extra_environ = {}
environ = self._make_environ()
# @@: Should this be all non-strings?
if isinstance(params, (list, tuple, dict)):
params = urlencode(params)
if hasattr(params, 'items'):
# Some other multi-dict like format
params = urlencode(params.items())
if six.PY3:
params = params.encode('utf8')
if upload_files:
params = cgi.parse_qsl(params, keep_blank_values=True)
content_type, params = self.encode_multipart(
params, upload_files)
environ['CONTENT_TYPE'] = content_type
elif params:
environ.setdefault('CONTENT_TYPE', 'application/x-www-form-urlencoded')
if '?' in url:
url, environ['QUERY_STRING'] = url.split('?', 1)
else:
environ['QUERY_STRING'] = ''
environ['CONTENT_LENGTH'] = str(len(params))
environ['REQUEST_METHOD'] = method
environ['wsgi.input'] = six.BytesIO(params)
self._set_headers(headers, environ)
environ.update(extra_environ)
req = TestRequest(url, environ, expect_errors)
return self.do_request(req, status=status)
def post(self, url, params=b'', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a POST request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
``upload_files`` is for file uploads. It should be a list of
``[(fieldname, filename, file_content)]``. You can also use
just ``[(fieldname, filename)]`` and the file content will be
read from disk.
Returns a `response object
<class-paste.fixture.TestResponse.html>`_
"""
return self._gen_request('POST', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=upload_files,
expect_errors=expect_errors)
def put(self, url, params=b'', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a PUT request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
``upload_files`` is for file uploads. It should be a list of
``[(fieldname, filename, file_content)]``. You can also use
just ``[(fieldname, filename)]`` and the file content will be
read from disk.
Returns a `response object
<class-paste.fixture.TestResponse.html>`_
"""
return self._gen_request('PUT', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=upload_files,
expect_errors=expect_errors)
def delete(self, url, params=b'', headers=None, extra_environ=None,
status=None, expect_errors=False):
"""
Do a DELETE request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
Returns a `response object
<class-paste.fixture.TestResponse.html>`_
"""
return self._gen_request('DELETE', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=None, expect_errors=expect_errors)
def _set_headers(self, headers, environ):
"""
Turn any headers into environ variables
"""
if not headers:
return
for header, value in headers.items():
if header.lower() == 'content-type':
var = 'CONTENT_TYPE'
elif header.lower() == 'content-length':
var = 'CONTENT_LENGTH'
else:
var = 'HTTP_%s' % header.replace('-', '_').upper()
environ[var] = value
def encode_multipart(self, params, files):
"""
Encodes a set of parameters (typically a name/value list) and
a set of files (a list of (name, filename, file_body)) into a
typical POST body, returning the (content_type, body).
"""
boundary = '----------a_BoUnDaRy%s$' % random.random()
content_type = 'multipart/form-data; boundary=%s' % boundary
if six.PY3:
boundary = boundary.encode('ascii')
lines = []
for key, value in params:
lines.append(b'--'+boundary)
line = 'Content-Disposition: form-data; name="%s"' % key
if six.PY3:
line = line.encode('utf8')
lines.append(line)
lines.append(b'')
line = value
if six.PY3 and isinstance(line, six.text_type):
line = line.encode('utf8')
lines.append(line)
for file_info in files:
key, filename, value = self._get_file_info(file_info)
lines.append(b'--'+boundary)
line = ('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename))
if six.PY3:
line = line.encode('utf8')
lines.append(line)
fcontent = mimetypes.guess_type(filename)[0]
line = ('Content-Type: %s'
% (fcontent or 'application/octet-stream'))
if six.PY3:
line = line.encode('utf8')
lines.append(line)
lines.append(b'')
lines.append(value)
lines.append(b'--' + boundary + b'--')
lines.append(b'')
body = b'\r\n'.join(lines)
return content_type, body
def _get_file_info(self, file_info):
if len(file_info) == 2:
# It only has a filename
filename = file_info[1]
if self.relative_to:
filename = os.path.join(self.relative_to, filename)
f = open(filename, 'rb')
content = f.read()
f.close()
return (file_info[0], filename, content)
elif len(file_info) == 3:
return file_info
else:
raise ValueError(
"upload_files need to be a list of tuples of (fieldname, "
"filename, filecontent) or (fieldname, filename); "
"you gave: %r"
% repr(file_info)[:100])
def do_request(self, req, status):
"""
Executes the given request (``req``), with the expected
``status``. Generally ``.get()`` and ``.post()`` are used
instead.
"""
if self.pre_request_hook:
self.pre_request_hook(self)
__tracebackhide__ = True
if self.cookies:
c = BaseCookie()
for name, value in self.cookies.items():
c[name] = value
hc = '; '.join(['='.join([m.key, m.value]) for m in c.values()])
req.environ['HTTP_COOKIE'] = hc
req.environ['paste.testing'] = True
req.environ['paste.testing_variables'] = {}
app = lint.middleware(self.app)
old_stdout = sys.stdout
out = CaptureStdout(old_stdout)
try:
sys.stdout = out
start_time = time.time()
raise_on_wsgi_error = not req.expect_errors
raw_res = wsgilib.raw_interactive(
app, req.url,
raise_on_wsgi_error=raise_on_wsgi_error,
**req.environ)
end_time = time.time()
finally:
sys.stdout = old_stdout
sys.stderr.write(out.getvalue())
res = self._make_response(raw_res, end_time - start_time)
res.request = req
for name, value in req.environ['paste.testing_variables'].items():
if hasattr(res, name):
raise ValueError(
"paste.testing_variables contains the variable %r, but "
"the response object already has an attribute by that "
"name" % name)
setattr(res, name, value)
if self.namespace is not None:
self.namespace['res'] = res
if not req.expect_errors:
self._check_status(status, res)
self._check_errors(res)
res.cookies_set = {}
for header in res.all_headers('set-cookie'):
c = BaseCookie(header)
for key, morsel in c.items():
self.cookies[key] = morsel.value
res.cookies_set[key] = morsel.value
if self.post_request_hook:
self.post_request_hook(self)
if self.namespace is None:
# It's annoying to return the response in doctests, as it'll
# be printed, so we only return it is we couldn't assign
# it anywhere
return res
def _check_status(self, status, res):
__tracebackhide__ = True
if status == '*':
return
if isinstance(status, (list, tuple)):
if res.status not in status:
raise AppError(
"Bad response: %s (not one of %s for %s)\n%s"
% (res.full_status, ', '.join(map(str, status)),
res.request.url, res.body))
return
if status is None:
if res.status >= 200 and res.status < 400:
return
body = res.body
if six.PY3:
body = body.decode('utf8', 'xmlcharrefreplace')
raise AppError(
"Bad response: %s (not 200 OK or 3xx redirect for %s)\n%s"
% (res.full_status, res.request.url,
body))
if status != res.status:
raise AppError(
"Bad response: %s (not %s)" % (res.full_status, status))
def _check_errors(self, res):
if res.errors:
raise AppError(
"Application had errors logged:\n%s" % res.errors)
def _make_response(self, resp, total_time):
status, headers, body, errors = resp
return TestResponse(self, status, headers, body, errors,
total_time)
class CaptureStdout(object):
def __init__(self, actual):
self.captured = StringIO()
self.actual = actual
def write(self, s):
self.captured.write(s)
self.actual.write(s)
def flush(self):
self.actual.flush()
def writelines(self, lines):
for item in lines:
self.write(item)
def getvalue(self):
return self.captured.getvalue()
class TestResponse(object):
# for py.test
disabled = True
"""
Instances of this class are return by `TestApp
<class-paste.fixture.TestApp.html>`_
"""
def __init__(self, test_app, status, headers, body, errors,
total_time):
self.test_app = test_app
self.status = int(status.split()[0])
self.full_status = status
self.headers = headers
self.header_dict = HeaderDict.fromlist(self.headers)
self.body = body
self.errors = errors
self._normal_body = None
self.time = total_time
self._forms_indexed = None
def forms__get(self):
"""
Returns a dictionary of ``Form`` objects. Indexes are both in
order (from zero) and by form id (if the form is given an id).
"""
if self._forms_indexed is None:
self._parse_forms()
return self._forms_indexed
forms = property(forms__get,
doc="""
A list of <form>s found on the page (instances of
`Form <class-paste.fixture.Form.html>`_)
""")
def form__get(self):
forms = self.forms
if not forms:
raise TypeError(
"You used response.form, but no forms exist")
if 1 in forms:
# There is more than one form
raise TypeError(
"You used response.form, but more than one form exists")
return forms[0]
form = property(form__get,
doc="""
Returns a single `Form
<class-paste.fixture.Form.html>`_ instance; it
is an error if there are multiple forms on the
page.
""")
_tag_re = re.compile(r'<(/?)([:a-z0-9_\-]*)(.*?)>', re.S|re.I)
def _parse_forms(self):
forms = self._forms_indexed = {}
form_texts = []
started = None
for match in self._tag_re.finditer(self.body):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag != 'form':
continue
if end:
assert started, (
"</form> unexpected at %s" % match.start())
form_texts.append(self.body[started:match.end()])
started = None
else:
assert not started, (
"Nested form tags at %s" % match.start())
started = match.start()
assert not started, (
"Danging form: %r" % self.body[started:])
for i, text in enumerate(form_texts):
form = Form(self, text)
forms[i] = form
if form.id:
forms[form.id] = form
def header(self, name, default=NoDefault):
"""
Returns the named header; an error if there is not exactly one
matching header (unless you give a default -- always an error
if there is more than one header)
"""
found = None
for cur_name, value in self.headers:
if cur_name.lower() == name.lower():
assert not found, (
"Ambiguous header: %s matches %r and %r"
% (name, found, value))
found = value
if found is None:
if default is NoDefault:
raise KeyError(
"No header found: %r (from %s)"
% (name, ', '.join([n for n, v in self.headers])))
else:
return default
return found
def all_headers(self, name):
"""
Gets all headers by the ``name``, returns as a list
"""
found = []
for cur_name, value in self.headers:
if cur_name.lower() == name.lower():
found.append(value)
return found
def follow(self, **kw):
"""
If this request is a redirect, follow that redirect. It
is an error if this is not a redirect response. Returns
another response object.
"""
assert self.status >= 300 and self.status < 400, (
"You can only follow redirect responses (not %s)"
% self.full_status)
location = self.header('location')
type, rest = splittype(location)
host, path = splithost(rest)
# @@: We should test that it's not a remote redirect
return self.test_app.get(location, **kw)
def click(self, description=None, linkid=None, href=None,
anchor=None, index=None, verbose=False):
"""
Click the link as described. Each of ``description``,
``linkid``, and ``url`` are *patterns*, meaning that they are
either strings (regular expressions), compiled regular
expressions (objects with a ``search`` method), or callables
returning true or false.
All the given patterns are ANDed together:
* ``description`` is a pattern that matches the contents of the
anchor (HTML and all -- everything between ``<a...>`` and
``</a>``)
* ``linkid`` is a pattern that matches the ``id`` attribute of
the anchor. It will receive the empty string if no id is
given.
* ``href`` is a pattern that matches the ``href`` of the anchor;
the literal content of that attribute, not the fully qualified
attribute.
* ``anchor`` is a pattern that matches the entire anchor, with
its contents.
If more than one link matches, then the ``index`` link is
followed. If ``index`` is not given and more than one link
matches, or if no link matches, then ``IndexError`` will be
raised.
If you give ``verbose`` then messages will be printed about
each link, and why it does or doesn't match. If you use
``app.click(verbose=True)`` you'll see a list of all the
links.
You can use multiple criteria to essentially assert multiple
aspects about the link, e.g., where the link's destination is.
"""
__tracebackhide__ = True
found_html, found_desc, found_attrs = self._find_element(
tag='a', href_attr='href',
href_extract=None,
content=description,
id=linkid,
href_pattern=href,
html_pattern=anchor,
index=index, verbose=verbose)
return self.goto(found_attrs['uri'])
def clickbutton(self, description=None, buttonid=None, href=None,
button=None, index=None, verbose=False):
"""
Like ``.click()``, except looks for link-like buttons.
This kind of button should look like
``<button onclick="...location.href='url'...">``.
"""
__tracebackhide__ = True
found_html, found_desc, found_attrs = self._find_element(
tag='button', href_attr='onclick',
href_extract=re.compile(r"location\.href='(.*?)'"),
content=description,
id=buttonid,
href_pattern=href,
html_pattern=button,
index=index, verbose=verbose)
return self.goto(found_attrs['uri'])
def _find_element(self, tag, href_attr, href_extract,
content, id,
href_pattern,
html_pattern,
index, verbose):
content_pat = _make_pattern(content)
id_pat = _make_pattern(id)
href_pat = _make_pattern(href_pattern)
html_pat = _make_pattern(html_pattern)
_tag_re = re.compile(r'<%s\s+(.*?)>(.*?)</%s>' % (tag, tag),
re.I+re.S)
def printlog(s):
if verbose:
print(s)
found_links = []
total_links = 0
for match in _tag_re.finditer(self.body):
el_html = match.group(0)
el_attr = match.group(1)
el_content = match.group(2)
attrs = _parse_attrs(el_attr)
if verbose:
printlog('Element: %r' % el_html)
if not attrs.get(href_attr):
printlog(' Skipped: no %s attribute' % href_attr)
continue
el_href = attrs[href_attr]
if href_extract:
m = href_extract.search(el_href)
if not m:
printlog(" Skipped: doesn't match extract pattern")
continue
el_href = m.group(1)
attrs['uri'] = el_href
if el_href.startswith('#'):
printlog(' Skipped: only internal fragment href')
continue
if el_href.startswith('javascript:'):
printlog(' Skipped: cannot follow javascript:')
continue
total_links += 1
if content_pat and not content_pat(el_content):
printlog(" Skipped: doesn't match description")
continue
if id_pat and not id_pat(attrs.get('id', '')):
printlog(" Skipped: doesn't match id")
continue
if href_pat and not href_pat(el_href):
printlog(" Skipped: doesn't match href")
continue
if html_pat and not html_pat(el_html):
printlog(" Skipped: doesn't match html")
continue
printlog(" Accepted")
found_links.append((el_html, el_content, attrs))
if not found_links:
raise IndexError(
"No matching elements found (from %s possible)"
% total_links)
if index is None:
if len(found_links) > 1:
raise IndexError(
"Multiple links match: %s"
% ', '.join([repr(anc) for anc, d, attr in found_links]))
found_link = found_links[0]
else:
try:
found_link = found_links[index]
except IndexError:
raise IndexError(
"Only %s (out of %s) links match; index %s out of range"
% (len(found_links), total_links, index))
return found_link
def goto(self, href, method='get', **args):
"""
Go to the (potentially relative) link ``href``, using the
given method (``'get'`` or ``'post'``) and any extra arguments
you want to pass to the ``app.get()`` or ``app.post()``
methods.
All hostnames and schemes will be ignored.
"""
scheme, host, path, query, fragment = urlparse.urlsplit(href)
# We
scheme = host = fragment = ''
href = urlparse.urlunsplit((scheme, host, path, query, fragment))
href = urlparse.urljoin(self.request.full_url, href)
method = method.lower()
assert method in ('get', 'post'), (
'Only "get" or "post" are allowed for method (you gave %r)'
% method)
if method == 'get':
method = self.test_app.get
else:
method = self.test_app.post
return method(href, **args)
_normal_body_regex = re.compile(br'[ \n\r\t]+')
def normal_body__get(self):
if self._normal_body is None:
self._normal_body = self._normal_body_regex.sub(
b' ', self.body)
return self._normal_body
normal_body = property(normal_body__get,
doc="""
Return the whitespace-normalized body
""")
def __contains__(self, s):
"""
A response 'contains' a string if it is present in the body
of the response. Whitespace is normalized when searching
for a string.
"""
if not isinstance(s, (six.binary_type, six.text_type)):
s = str(s)
if isinstance(s, six.text_type):
## FIXME: we don't know that this response uses utf8:
s = s.encode('utf8')
return (self.body.find(s) != -1
or self.normal_body.find(s) != -1)
def mustcontain(self, *strings, **kw):
"""
Assert that the response contains all of the strings passed
in as arguments.
Equivalent to::
assert string in res
"""
if 'no' in kw:
no = kw['no']
del kw['no']
if isinstance(no, (six.binary_type, six.text_type)):
no = [no]
else:
no = []
if kw:
raise TypeError(
"The only keyword argument allowed is 'no'")
for s in strings:
if not s in self:
print("Actual response (no %r):" % s, file=sys.stderr)
print(self, file=sys.stderr)
raise IndexError(
"Body does not contain string %r" % s)
for no_s in no:
if no_s in self:
print("Actual response (has %r)" % no_s, file=sys.stderr)
print(self, file=sys.stderr)
raise IndexError(
"Body contains string %r" % s)
def __repr__(self):
body = self.body
if six.PY3:
body = body.decode('utf8', 'xmlcharrefreplace')
body = body[:20]
return '<Response %s %r>' % (self.full_status, body)
def __str__(self):
simple_body = b'\n'.join([l for l in self.body.splitlines()
if l.strip()])
if six.PY3:
simple_body = simple_body.decode('utf8', 'xmlcharrefreplace')
return 'Response: %s\n%s\n%s' % (
self.status,
'\n'.join(['%s: %s' % (n, v) for n, v in self.headers]),
simple_body)
def showbrowser(self):
"""
Show this response in a browser window (for debugging purposes,
when it's hard to read the HTML).
"""
import webbrowser
fn = tempnam_no_warning(None, 'paste-fixture') + '.html'
f = open(fn, 'wb')
f.write(self.body)
f.close()
url = 'file:' + fn.replace(os.sep, '/')
webbrowser.open_new(url)
class TestRequest(object):
# for py.test
disabled = True
"""
Instances of this class are created by `TestApp
<class-paste.fixture.TestApp.html>`_ with the ``.get()`` and
``.post()`` methods, and are consumed there by ``.do_request()``.
Instances are also available as a ``.req`` attribute on
`TestResponse <class-paste.fixture.TestResponse.html>`_ instances.
Useful attributes:
``url``:
The url (actually usually the path) of the request, without
query string.
``environ``:
The environment dictionary used for the request.
``full_url``:
The url/path, with query string.
"""
def __init__(self, url, environ, expect_errors=False):
if url.startswith('http://localhost'):
url = url[len('http://localhost'):]
self.url = url
self.environ = environ
if environ.get('QUERY_STRING'):
self.full_url = url + '?' + environ['QUERY_STRING']
else:
self.full_url = url
self.expect_errors = expect_errors
class Form(object):
"""
This object represents a form that has been found in a page.
This has a couple useful attributes:
``text``:
the full HTML of the form.
``action``:
the relative URI of the action.
``method``:
the method (e.g., ``'GET'``).
``id``:
the id, or None if not given.
``fields``:
a dictionary of fields, each value is a list of fields by
that name. ``<input type=\"radio\">`` and ``<select>`` are
both represented as single fields with multiple options.
"""
# @@: This really should be using Mechanize/ClientForm or
# something...
_tag_re = re.compile(r'<(/?)([:a-z0-9_\-]*)([^>]*?)>', re.I)
def __init__(self, response, text):
self.response = response
self.text = text
self._parse_fields()
self._parse_action()
def _parse_fields(self):
in_select = None
in_textarea = None
fields = {}
for match in self._tag_re.finditer(self.text):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag not in ('input', 'select', 'option', 'textarea',
'button'):
continue
if tag == 'select' and end:
assert in_select, (
'%r without starting select' % match.group(0))
in_select = None
continue
if tag == 'textarea' and end:
assert in_textarea, (
"</textarea> with no <textarea> at %s" % match.start())
in_textarea[0].value = html_unquote(self.text[in_textarea[1]:match.start()])
in_textarea = None
continue
if end:
continue
attrs = _parse_attrs(match.group(3))
if 'name' in attrs:
name = attrs.pop('name')
else:
name = None
if tag == 'option':
in_select.options.append((attrs.get('value'),
'selected' in attrs))
continue
if tag == 'input' and attrs.get('type') == 'radio':
field = fields.get(name)
if not field:
field = Radio(self, tag, name, match.start(), **attrs)
fields.setdefault(name, []).append(field)
else:
field = field[0]
assert isinstance(field, Radio)
field.options.append((attrs.get('value'),
'checked' in attrs))
continue
tag_type = tag
if tag == 'input':
tag_type = attrs.get('type', 'text').lower()
FieldClass = Field.classes.get(tag_type, Field)
field = FieldClass(self, tag, name, match.start(), **attrs)
if tag == 'textarea':
assert not in_textarea, (
"Nested textareas: %r and %r"
% (in_textarea, match.group(0)))
in_textarea = field, match.end()
elif tag == 'select':
assert not in_select, (
"Nested selects: %r and %r"
% (in_select, match.group(0)))
in_select = field
fields.setdefault(name, []).append(field)
self.fields = fields
def _parse_action(self):
self.action = None
for match in self._tag_re.finditer(self.text):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag != 'form':
continue
if end:
break
attrs = _parse_attrs(match.group(3))
self.action = attrs.get('action', '')
self.method = attrs.get('method', 'GET')
self.id = attrs.get('id')
# @@: enctype?
else:
assert 0, "No </form> tag found"
assert self.action is not None, (
"No <form> tag found")
def __setitem__(self, name, value):
"""
Set the value of the named field. If there is 0 or multiple
fields by that name, it is an error.
Setting the value of a ``<select>`` selects the given option
(and confirms it is an option). Setting radio fields does the
same. Checkboxes get boolean values. You cannot set hidden
fields or buttons.
Use ``.set()`` if there is any ambiguity and you must provide
an index.
"""
fields = self.fields.get(name)
assert fields is not None, (
"No field by the name %r found (fields: %s)"
% (name, ', '.join(map(repr, self.fields.keys()))))
assert len(fields) == 1, (
"Multiple fields match %r: %s"
% (name, ', '.join(map(repr, fields))))
fields[0].value = value
def __getitem__(self, name):
"""
Get the named field object (ambiguity is an error).
"""
fields = self.fields.get(name)
assert fields is not None, (
"No field by the name %r found" % name)
assert len(fields) == 1, (
"Multiple fields match %r: %s"
% (name, ', '.join(map(repr, fields))))
return fields[0]
def set(self, name, value, index=None):
"""
Set the given name, using ``index`` to disambiguate.
"""
if index is None:
self[name] = value
else:
fields = self.fields.get(name)
assert fields is not None, (
"No fields found matching %r" % name)
field = fields[index]
field.value = value
def get(self, name, index=None, default=NoDefault):
"""
Get the named/indexed field object, or ``default`` if no field
is found.
"""
fields = self.fields.get(name)
if fields is None and default is not NoDefault:
return default
if index is None:
return self[name]
else:
fields = self.fields.get(name)
assert fields is not None, (
"No fields found matching %r" % name)
field = fields[index]
return field
def select(self, name, value, index=None):
"""
Like ``.set()``, except also confirms the target is a
``<select>``.
"""
field = self.get(name, index=index)
assert isinstance(field, Select)
field.value = value
def submit(self, name=None, index=None, **args):
"""
Submits the form. If ``name`` is given, then also select that
button (using ``index`` to disambiguate)``.
Any extra keyword arguments are passed to the ``.get()`` or
``.post()`` method.
Returns a response object.
"""
fields = self.submit_fields(name, index=index)
return self.response.goto(self.action, method=self.method,
params=fields, **args)
def submit_fields(self, name=None, index=None):
"""
Return a list of ``[(name, value), ...]`` for the current
state of the form.
"""
submit = []
if name is not None:
field = self.get(name, index=index)
submit.append((field.name, field.value_if_submitted()))
for name, fields in self.fields.items():
if name is None:
continue
for field in fields:
value = field.value
if value is None:
continue
submit.append((name, value))
return submit
_attr_re = re.compile(r'([^= \n\r\t]+)[ \n\r\t]*(?:=[ \n\r\t]*(?:"([^"]*)"|([^"][^ \n\r\t>]*)))?', re.S)
def _parse_attrs(text):
attrs = {}
for match in _attr_re.finditer(text):
attr_name = match.group(1).lower()
attr_body = match.group(2) or match.group(3)
attr_body = html_unquote(attr_body or '')
attrs[attr_name] = attr_body
return attrs
class Field(object):
"""
Field object.
"""
# Dictionary of field types (select, radio, etc) to classes
classes = {}
settable = True
def __init__(self, form, tag, name, pos,
value=None, id=None, **attrs):
self.form = form
self.tag = tag
self.name = name
self.pos = pos
self._value = value
self.id = id
self.attrs = attrs
def value__set(self, value):
if not self.settable:
raise AttributeError(
"You cannot set the value of the <%s> field %r"
% (self.tag, self.name))
self._value = value
def force_value(self, value):
"""
Like setting a value, except forces it even for, say, hidden
fields.
"""
self._value = value
def value__get(self):
return self._value
value = property(value__get, value__set)
class Select(Field):
"""
Field representing ``<select>``
"""
def __init__(self, *args, **attrs):
super(Select, self).__init__(*args, **attrs)
self.options = []
self.multiple = attrs.get('multiple')
assert not self.multiple, (
"<select multiple> not yet supported")
# Undetermined yet:
self.selectedIndex = None
def value__set(self, value):
for i, (option, checked) in enumerate(self.options):
if option == str(value):
self.selectedIndex = i
break
else:
raise ValueError(
"Option %r not found (from %s)"
% (value, ', '.join(
[repr(o) for o, c in self.options])))
def value__get(self):
if self.selectedIndex is not None:
return self.options[self.selectedIndex][0]
else:
for option, checked in self.options:
if checked:
return option
else:
if self.options:
return self.options[0][0]
else:
return None
value = property(value__get, value__set)
Field.classes['select'] = Select
class Radio(Select):
"""
Field representing ``<input type="radio">``
"""
Field.classes['radio'] = Radio
class Checkbox(Field):
"""
Field representing ``<input type="checkbox">``
"""
def __init__(self, *args, **attrs):
super(Checkbox, self).__init__(*args, **attrs)
self.checked = 'checked' in attrs
def value__set(self, value):
self.checked = not not value
def value__get(self):
if self.checked:
if self._value is None:
return 'on'
else:
return self._value
else:
return None
value = property(value__get, value__set)
Field.classes['checkbox'] = Checkbox
class Text(Field):
"""
Field representing ``<input type="text">``
"""
def __init__(self, form, tag, name, pos,
value='', id=None, **attrs):
#text fields default to empty string
Field.__init__(self, form, tag, name, pos,
value=value, id=id, **attrs)
Field.classes['text'] = Text
class Textarea(Text):
"""
Field representing ``<textarea>``
"""
Field.classes['textarea'] = Textarea
class Hidden(Text):
"""
Field representing ``<input type="hidden">``
"""
Field.classes['hidden'] = Hidden
class Submit(Field):
"""
Field representing ``<input type="submit">`` and ``<button>``
"""
settable = False
def value__get(self):
return None
value = property(value__get)
def value_if_submitted(self):
return self._value
Field.classes['submit'] = Submit
Field.classes['button'] = Submit
Field.classes['image'] = Submit
############################################################
## Command-line testing
############################################################
class TestFileEnvironment(object):
"""
This represents an environment in which files will be written, and
scripts will be run.
"""
# for py.test
disabled = True
def __init__(self, base_path, template_path=None,
script_path=None,
environ=None, cwd=None, start_clear=True,
ignore_paths=None, ignore_hidden=True):
"""
Creates an environment. ``base_path`` is used as the current
working directory, and generally where changes are looked for.
``template_path`` is the directory to look for *template*
files, which are files you'll explicitly add to the
environment. This is done with ``.writefile()``.
``script_path`` is the PATH for finding executables. Usually
grabbed from ``$PATH``.
``environ`` is the operating system environment,
``os.environ`` if not given.
``cwd`` is the working directory, ``base_path`` by default.
If ``start_clear`` is true (default) then the ``base_path``
will be cleared (all files deleted) when an instance is
created. You can also use ``.clear()`` to clear the files.
``ignore_paths`` is a set of specific filenames that should be
ignored when created in the environment. ``ignore_hidden``
means, if true (default) that filenames and directories
starting with ``'.'`` will be ignored.
"""
self.base_path = base_path
self.template_path = template_path
if environ is None:
environ = os.environ.copy()
self.environ = environ
if script_path is None:
if sys.platform == 'win32':
script_path = environ.get('PATH', '').split(';')
else:
script_path = environ.get('PATH', '').split(':')
self.script_path = script_path
if cwd is None:
cwd = base_path
self.cwd = cwd
if start_clear:
self.clear()
elif not os.path.exists(base_path):
os.makedirs(base_path)
self.ignore_paths = ignore_paths or []
self.ignore_hidden = ignore_hidden
def run(self, script, *args, **kw):
"""
Run the command, with the given arguments. The ``script``
argument can have space-separated arguments, or you can use
the positional arguments.
Keywords allowed are:
``expect_error``: (default False)
Don't raise an exception in case of errors
``expect_stderr``: (default ``expect_error``)
Don't raise an exception if anything is printed to stderr
``stdin``: (default ``""``)
Input to the script
``printresult``: (default True)
Print the result after running
``cwd``: (default ``self.cwd``)
The working directory to run in
Returns a `ProcResponse
<class-paste.fixture.ProcResponse.html>`_ object.
"""
__tracebackhide__ = True
expect_error = _popget(kw, 'expect_error', False)
expect_stderr = _popget(kw, 'expect_stderr', expect_error)
cwd = _popget(kw, 'cwd', self.cwd)
stdin = _popget(kw, 'stdin', None)
printresult = _popget(kw, 'printresult', True)
args = map(str, args)
assert not kw, (
"Arguments not expected: %s" % ', '.join(kw.keys()))
if ' ' in script:
assert not args, (
"You cannot give a multi-argument script (%r) "
"and arguments (%s)" % (script, args))
script, args = script.split(None, 1)
args = shlex.split(args)
script = self._find_exe(script)
all = [script] + args
files_before = self._find_files()
proc = subprocess.Popen(all, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
env=self.environ)
stdout, stderr = proc.communicate(stdin)
files_after = self._find_files()
result = ProcResult(
self, all, stdin, stdout, stderr,
returncode=proc.returncode,
files_before=files_before,
files_after=files_after)
if printresult:
print(result)
print('-'*40)
if not expect_error:
result.assert_no_error()
if not expect_stderr:
result.assert_no_stderr()
return result
def _find_exe(self, script_name):
if self.script_path is None:
script_name = os.path.join(self.cwd, script_name)
if not os.path.exists(script_name):
raise OSError(
"Script %s does not exist" % script_name)
return script_name
for path in self.script_path:
fn = os.path.join(path, script_name)
if os.path.exists(fn):
return fn
raise OSError(
"Script %s could not be found in %s"
% (script_name, ':'.join(self.script_path)))
def _find_files(self):
result = {}
for fn in os.listdir(self.base_path):
if self._ignore_file(fn):
continue
self._find_traverse(fn, result)
return result
def _ignore_file(self, fn):
if fn in self.ignore_paths:
return True
if self.ignore_hidden and os.path.basename(fn).startswith('.'):
return True
return False
def _find_traverse(self, path, result):
full = os.path.join(self.base_path, path)
if os.path.isdir(full):
result[path] = FoundDir(self.base_path, path)
for fn in os.listdir(full):
fn = os.path.join(path, fn)
if self._ignore_file(fn):
continue
self._find_traverse(fn, result)
else:
result[path] = FoundFile(self.base_path, path)
def clear(self):
"""
Delete all the files in the base directory.
"""
if os.path.exists(self.base_path):
shutil.rmtree(self.base_path)
os.mkdir(self.base_path)
def writefile(self, path, content=None,
frompath=None):
"""
Write a file to the given path. If ``content`` is given then
that text is written, otherwise the file in ``frompath`` is
used. ``frompath`` is relative to ``self.template_path``
"""
full = os.path.join(self.base_path, path)
if not os.path.exists(os.path.dirname(full)):
os.makedirs(os.path.dirname(full))
f = open(full, 'wb')
if content is not None:
f.write(content)
if frompath is not None:
if self.template_path:
frompath = os.path.join(self.template_path, frompath)
f2 = open(frompath, 'rb')
f.write(f2.read())
f2.close()
f.close()
return FoundFile(self.base_path, path)
class ProcResult(object):
"""
Represents the results of running a command in
`TestFileEnvironment
<class-paste.fixture.TestFileEnvironment.html>`_.
Attributes to pay particular attention to:
``stdout``, ``stderr``:
What is produced
``files_created``, ``files_deleted``, ``files_updated``:
Dictionaries mapping filenames (relative to the ``base_dir``)
to `FoundFile <class-paste.fixture.FoundFile.html>`_ or
`FoundDir <class-paste.fixture.FoundDir.html>`_ objects.
"""
def __init__(self, test_env, args, stdin, stdout, stderr,
returncode, files_before, files_after):
self.test_env = test_env
self.args = args
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.returncode = returncode
self.files_before = files_before
self.files_after = files_after
self.files_deleted = {}
self.files_updated = {}
self.files_created = files_after.copy()
for path, f in files_before.items():
if path not in files_after:
self.files_deleted[path] = f
continue
del self.files_created[path]
if f.mtime < files_after[path].mtime:
self.files_updated[path] = files_after[path]
def assert_no_error(self):
__tracebackhide__ = True
assert self.returncode == 0, (
"Script returned code: %s" % self.returncode)
def assert_no_stderr(self):
__tracebackhide__ = True
if self.stderr:
print('Error output:')
print(self.stderr)
raise AssertionError("stderr output not expected")
def __str__(self):
s = ['Script result: %s' % ' '.join(self.args)]
if self.returncode:
s.append(' return code: %s' % self.returncode)
if self.stderr:
s.append('-- stderr: --------------------')
s.append(self.stderr)
if self.stdout:
s.append('-- stdout: --------------------')
s.append(self.stdout)
for name, files, show_size in [
('created', self.files_created, True),
('deleted', self.files_deleted, True),
('updated', self.files_updated, True)]:
if files:
s.append('-- %s: -------------------' % name)
files = files.items()
files.sort()
last = ''
for path, f in files:
t = ' %s' % _space_prefix(last, path, indent=4,
include_sep=False)
last = path
if show_size and f.size != 'N/A':
t += ' (%s bytes)' % f.size
s.append(t)
return '\n'.join(s)
class FoundFile(object):
"""
Represents a single file found as the result of a command.
Has attributes:
``path``:
The path of the file, relative to the ``base_path``
``full``:
The full path
``stat``:
The results of ``os.stat``. Also ``mtime`` and ``size``
contain the ``.st_mtime`` and ``st_size`` of the stat.
``bytes``:
The contents of the file.
You may use the ``in`` operator with these objects (tested against
the contents of the file), and the ``.mustcontain()`` method.
"""
file = True
dir = False
def __init__(self, base_path, path):
self.base_path = base_path
self.path = path
self.full = os.path.join(base_path, path)
self.stat = os.stat(self.full)
self.mtime = self.stat.st_mtime
self.size = self.stat.st_size
self._bytes = None
def bytes__get(self):
if self._bytes is None:
f = open(self.full, 'rb')
self._bytes = f.read()
f.close()
return self._bytes
bytes = property(bytes__get)
def __contains__(self, s):
return s in self.bytes
def mustcontain(self, s):
__tracebackhide__ = True
bytes_ = self.bytes
if s not in bytes_:
print('Could not find %r in:' % s)
print(bytes_)
assert s in bytes_
def __repr__(self):
return '<%s %s:%s>' % (
self.__class__.__name__,
self.base_path, self.path)
class FoundDir(object):
"""
Represents a directory created by a command.
"""
file = False
dir = True
def __init__(self, base_path, path):
self.base_path = base_path
self.path = path
self.full = os.path.join(base_path, path)
self.size = 'N/A'
self.mtime = 'N/A'
def __repr__(self):
return '<%s %s:%s>' % (
self.__class__.__name__,
self.base_path, self.path)
def _popget(d, key, default=None):
"""
Pop the key if found (else return default)
"""
if key in d:
return d.pop(key)
return default
def _space_prefix(pref, full, sep=None, indent=None, include_sep=True):
"""
Anything shared by pref and full will be replaced with spaces
in full, and full returned.
"""
if sep is None:
sep = os.path.sep
pref = pref.split(sep)
full = full.split(sep)
padding = []
while pref and full and pref[0] == full[0]:
if indent is None:
padding.append(' ' * (len(full[0]) + len(sep)))
else:
padding.append(' ' * indent)
full.pop(0)
pref.pop(0)
if padding:
if include_sep:
return ''.join(padding) + sep + sep.join(full)
else:
return ''.join(padding) + sep.join(full)
else:
return sep.join(full)
def _make_pattern(pat):
if pat is None:
return None
if isinstance(pat, (six.binary_type, six.text_type)):
pat = re.compile(pat)
if hasattr(pat, 'search'):
return pat.search
if callable(pat):
return pat
assert 0, (
"Cannot make callable pattern object out of %r" % pat)
def setup_module(module=None):
"""
This is used by py.test if it is in the module, so you can
import this directly.
Use like::
from paste.fixture import setup_module
"""
# Deprecated June 2008
import warnings
warnings.warn(
'setup_module is deprecated',
DeprecationWarning, 2)
if module is None:
# The module we were called from must be the module...
module = sys._getframe().f_back.f_globals['__name__']
if isinstance(module, (six.binary_type, six.text_type)):
module = sys.modules[module]
if hasattr(module, 'reset_state'):
module.reset_state()
def html_unquote(v):
"""
Unquote (some) entities in HTML. (incomplete)
"""
for ent, repl in [(' ', ' '), ('>', '>'),
('<', '<'), ('"', '"'),
('&', '&')]:
v = v.replace(ent, repl)
return v
|
ptdtan/Ragout
|
refs/heads/master
|
lib/networkx/algorithms/traversal/depth_first_search.py
|
22
|
"""
==================
Depth-first search
==================
Basic algorithms for depth-first searching.
Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
by D. Eppstein, July 2004.
"""
__author__ = """\n""".join(['Aric Hagberg <hagberg@lanl.gov>'])
__all__ = ['dfs_edges', 'dfs_tree',
'dfs_predecessors', 'dfs_successors',
'dfs_preorder_nodes','dfs_postorder_nodes',
'dfs_labeled_edges']
import networkx as nx
from collections import defaultdict
def dfs_edges(G,source=None):
"""Produce edges in a depth-first-search starting at source."""
# Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
# by D. Eppstein, July 2004.
if source is None:
# produce edges for all components
nodes=G
else:
# produce edges for components with source
nodes=[source]
visited=set()
for start in nodes:
if start in visited:
continue
visited.add(start)
stack = [(start,iter(G[start]))]
while stack:
parent,children = stack[-1]
try:
child = next(children)
if child not in visited:
yield parent,child
visited.add(child)
stack.append((child,iter(G[child])))
except StopIteration:
stack.pop()
def dfs_tree(G, source):
"""Return directed tree of depth-first-search from source."""
T = nx.DiGraph()
if source is None:
T.add_nodes_from(G)
else:
T.add_node(source)
T.add_edges_from(dfs_edges(G,source))
return T
def dfs_predecessors(G, source=None):
"""Return dictionary of predecessors in depth-first-search from source."""
return dict((t,s) for s,t in dfs_edges(G,source=source))
def dfs_successors(G, source=None):
"""Return dictionary of successors in depth-first-search from source."""
d=defaultdict(list)
for s,t in dfs_edges(G,source=source):
d[s].append(t)
return dict(d)
def dfs_postorder_nodes(G,source=None):
"""Produce nodes in a depth-first-search post-ordering starting
from source.
"""
post=(v for u,v,d in nx.dfs_labeled_edges(G,source=source)
if d['dir']=='reverse')
# chain source to end of pre-ordering
# return chain(post,[source])
return post
def dfs_preorder_nodes(G,source=None):
"""Produce nodes in a depth-first-search pre-ordering starting at source."""
pre=(v for u,v,d in nx.dfs_labeled_edges(G,source=source)
if d['dir']=='forward')
# chain source to beginning of pre-ordering
# return chain([source],pre)
return pre
def dfs_labeled_edges(G,source=None):
"""Produce edges in a depth-first-search starting at source and
labeled by direction type (forward, reverse, nontree).
"""
# Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
# by D. Eppstein, July 2004.
if source is None:
# produce edges for all components
nodes=G
else:
# produce edges for components with source
nodes=[source]
visited=set()
for start in nodes:
if start in visited:
continue
yield start,start,{'dir':'forward'}
visited.add(start)
stack = [(start,iter(G[start]))]
while stack:
parent,children = stack[-1]
try:
child = next(children)
if child in visited:
yield parent,child,{'dir':'nontree'}
else:
yield parent,child,{'dir':'forward'}
visited.add(child)
stack.append((child,iter(G[child])))
except StopIteration:
stack.pop()
if stack:
yield stack[-1][0],parent,{'dir':'reverse'}
yield start,start,{'dir':'reverse'}
|
jkonecny12/pykickstart
|
refs/heads/master
|
pykickstart/commands/multipath.py
|
8
|
#
# Chris Lumens <clumens@redhat.com>
# Peter Jones <pjones@redhat.com>
#
# Copyright 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
from pykickstart.i18n import _
class FC6_MpPathData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.mpdev = kwargs.get("mpdev", "")
self.device = kwargs.get("device", "")
self.rule = kwargs.get("rule", "")
def __str__(self):
return " --device=%s --rule=\"%s\"" % (self.device, self.rule)
class FC6_MultiPathData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.paths = kwargs.get("paths", [])
def __str__(self):
retval = BaseData.__str__(self)
for path in self.paths:
retval += "multipath --mpdev=%s %s\n" % (self.name, path.__str__())
return retval
class FC6_MultiPath(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=50, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.mpaths = kwargs.get("mpaths", [])
def __str__(self):
retval = ""
for mpath in self.mpaths:
retval += mpath.__str__()
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--name", dest="name", action="store", type="string",
required=1)
op.add_option("--device", dest="device", action="store", type="string",
required=1)
op.add_option("--rule", dest="rule", action="store", type="string",
required=1)
return op
def parse(self, args):
(opts, _extra) = self.op.parse_args(args=args, lineno=self.lineno)
dd = FC6_MpPathData()
self._setToObj(self.op, opts, dd)
dd.lineno = self.lineno
dd.mpdev = dd.mpdev.split('/')[-1]
parent = None
for x in range(0, len(self.mpaths)):
mpath = self.mpaths[x]
for path in mpath.paths:
if path.device == dd.device:
mapping = {"device": path.device, "multipathdev": path.mpdev}
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("Device '%(device)s' is already used in multipath '%(multipathdev)s'") % mapping))
if mpath.name == dd.mpdev:
parent = x
if parent is None:
mpath = FC6_MultiPathData()
return mpath
else:
mpath = self.mpaths[parent]
return dd
def dataList(self):
return self.mpaths
|
proevo/pythondotorg
|
refs/heads/master
|
jobs/feeds.py
|
3
|
from django.contrib.syndication.views import Feed
from django.urls import reverse_lazy
from .models import Job
class JobFeed(Feed):
""" Python.org Jobs RSS Feed """
title = "Python.org Jobs Feed"
description = "Python jobs from Python.org"
link = reverse_lazy('jobs:job_list')
def items(self):
return Job.objects.approved()[:20]
def item_title(self, item):
return item.display_name
def item_description(self, item):
""" Description """
return '\n'.join([
item.display_location,
item.description.rendered,
item.requirements.rendered,
])
|
mohammed-alfatih/servo
|
refs/heads/master
|
tests/wpt/harness/docs/conf.py
|
207
|
# -*- coding: utf-8 -*-
#
# wptrunner documentation build configuration file, created by
# sphinx-quickstart on Mon May 19 18:14:20 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptrunner'
copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptrunnerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wptrunner.tex', u'wptrunner Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptrunner', u'wptrunner Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptrunner', u'wptrunner Documentation',
u'James Graham', 'wptrunner', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}
|
HERA-Team/pyuvdata
|
refs/heads/multi_source
|
scripts/readwrite_uvfits.py
|
1
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Read in a uvfits file and write a new one out."""
import argparse
import os.path as op
from pyuvdata import UVData
parser = argparse.ArgumentParser()
parser.add_argument("uvfits_read", help="name of a uvfits file to read in")
parser.add_argument("uvfits_write", help="name of a uvfits file to write out")
args = parser.parse_args()
uvfits_file_in = args.uvfits_read
if not op.isfile(uvfits_file_in):
raise IOError("There is no file named {}".format(args.uvfits_file_in))
uvfits_file_out = args.uvfits_write
this_uv = UVData()
this_uv.read_uvfits(uvfits_file_in)
this_uv.write_uvfits(uvfits_file_out)
del this_uv
|
Serag8/Bachelor
|
refs/heads/master
|
google_appengine/lib/webob-1.1.1/webob/__init__.py
|
21
|
from webob.datetime_utils import *
from webob.request import *
from webob.response import *
from webob.util import html_escape
__all__ = [
'Request', 'Response',
'UTC', 'day', 'week', 'hour', 'minute', 'second', 'month', 'year',
'html_escape'
]
BaseRequest.ResponseClass = Response
Response.RequestClass = Request
__version__ = '1.1'
|
bejmy/backend
|
refs/heads/master
|
bejmy/transactions/admin.py
|
1
|
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.db.models import DecimalField, Sum
from django.db.models.functions import Coalesce
from django.utils.translation import ugettext as _
from mptt.admin import TreeRelatedFieldListFilter
from rangefilter.filter import DateRangeFilter
from bejmy.transactions.forms import TransactionForm
from bejmy.transactions.models import Transaction
from bejmy.categories.models import Category
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from bejmy.transactions.formats import MBankCSVFormat
class TransactionResource(resources.ModelResource):
def skip_row(self, instance, original):
if original.pk or instance.amount == 0:
return True
def before_import_row(self, row, **kwargs):
user_pk = kwargs['user'].pk
if row['user'] is None:
row['user'] = user_pk
if row['created_by'] is None:
row['created_by'] = user_pk
if row['modified_by'] is None:
row['modified_by'] = user_pk
from bejmy.accounts.models import Account
if row['source']:
try:
source = Account.objects.get(user_id=user_pk, account_number=row['source']) # noqa
except Account.DoesNotExist:
row['source'] = None
else:
row['source'] = source.pk
if row['destination']:
try:
destination = Account.objects.get(user_id=user_pk, account_number=row['destination']) # noqa
except Account.DoesNotExist:
row['destination'] = None
else:
row['destination'] = destination.pk
class Meta:
model = Transaction
import_id_fields = ['import_hash']
fields = [
'id',
'user',
'source',
'destination',
'amount',
'description',
'datetime',
'balanced',
'balanced_changed',
'transaction_type',
'category',
'created_by',
'created_at',
'modified_by',
'modified_at',
'status',
'tags',
'import_hash',
]
class CategoryFilter(TreeRelatedFieldListFilter):
mptt_level_indent = 3
template = 'admin/filter_dropdown.html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# fix
self.lookup_kwarg = self.changed_lookup_kwarg
def field_choices(self, field, request, model_admin):
"""Use existing `padding_style` as prefix for select options."""
field.rel.limit_choices_to = {'user': request.user}
choices = []
original_choices = super().field_choices(field, request, model_admin)
for pk, val, padding_style in original_choices:
a = padding_style.index(':') + 1
b = padding_style.index('px')
choices.append((pk, val, '-' * int(padding_style[a:b])))
return choices
class TransactionChangeList(ChangeList):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.get_summary()
self.get_categories()
def get_categories(self):
queryset = self.queryset._clone().filter(
transaction_type=Transaction.TRANSACTION_WITHDRAWAL)
queryset = queryset.values_list('category')
queryset = queryset.annotate(amount=Sum('amount'))
category_amount = queryset.order_by('-amount')
if category_amount:
categories = Category.objects.in_bulk(
filter(None, tuple(zip(*category_amount))[0]))
self.categories = ((categories.get(pk, 'Other'), amount) for pk, amount in category_amount) # noqa
else:
self.categories = ()
def _get_summary_entry(self, summary, key, **filter_kwargs):
queryset = self.queryset._clone().filter(**filter_kwargs)
field = DecimalField(max_digits=9, decimal_places=2)
aggregate_kwargs = {
key: Coalesce(Sum('amount', output_field=field), 0)
}
summary.update(queryset.aggregate(**aggregate_kwargs))
return summary
def get_summary(self):
queries = {
_('Planned'): {'status': Transaction.STATUS_PLANNED},
_('Registered'): {'status': Transaction.STATUS_REGISTERED},
_('Balanced'): {'status': Transaction.STATUS_BALANCED},
_('Total'): {},
}
self.summary = []
for name, filters in queries.items():
self.summary.append(self._get_single_summary(name, **filters))
def _get_single_summary(self, name, **extra_filters):
summary = {}
summary['name'] = name
query = {
'withdrawal': {
'transaction_type': Transaction.TRANSACTION_WITHDRAWAL
},
'deposit': {
'transaction_type': Transaction.TRANSACTION_DEPOSIT,
},
'transfer': {
'transaction_type': Transaction.TRANSACTION_TRANSFER,
},
}
for key, filters in query.items():
if extra_filters is not None:
filters.update(extra_filters)
self._get_summary_entry(summary, key, **filters)
summary['total'] = summary['deposit'] - summary['withdrawal']
return summary
class TransactionAdminBase:
_form = TransactionForm
fieldset_base = (None, {
'fields': (
'source',
'destination',
'amount',
'description',
'datetime',
'balanced',
'category',
'tags',
)
})
def get_fieldsets(self, request, obj=None):
fieldsets = [self.fieldset_base]
return fieldsets
@admin.register(Transaction)
class TransactionAdmin(TransactionAdminBase, ImportExportModelAdmin):
change_list_template = 'admin/transactions/transaction/change_list.html'
resource_class = TransactionResource
formats = [MBankCSVFormat]
def get_changelist(self, request, **kwargs):
return TransactionChangeList
fieldset_info = ('Info', {
'fields': (
'status',
'transaction_type',
'user',
'balanced_changed',
'created_at',
'created_by',
'modified_at',
'modified_by',
),
})
def get_fieldsets(self, request, obj=None):
fieldsets = super().get_fieldsets(request, obj)
if obj is not None:
fieldsets.append(self.fieldset_info)
return fieldsets
def get_form(self, request, *args, **kwargs):
self._form.user = request.user
form = super().get_form(request, *args, form=self._form)
for field in ('source', 'destination', 'category'):
form.base_fields[field].widget.can_add_related = False
form.base_fields[field].widget.can_change_related = False
return form
list_display_links = (
'__str__',
)
list_display = (
'__str__',
'amount',
'description',
'category',
'tag_list',
'balanced',
'user',
'status',
'transaction_type',
'source',
'destination',
)
readonly_fields = (
'balanced_changed',
'created_at',
'created_by',
'modified_at',
'modified_by',
'status',
'transaction_type',
'user',
)
list_filter = (
('datetime', DateRangeFilter),
('category', CategoryFilter),
'tags',
'transaction_type',
'status',
'source',
'destination',
'user',
)
list_editable = (
'balanced',
)
search_fields = (
'description',
)
date_hierarchy = 'datetime'
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
def get_queryset(self, request, *args, **kwargs):
queryset = super().get_queryset(request, *args, **kwargs)
if not request.user.is_superuser:
queryset = queryset.filter(user=request.user)
queryset = queryset.prefetch_related('tags', 'category', 'user',
'source', 'destination')
return queryset
|
mjrulesamrat/django-rest-auth
|
refs/heads/master
|
test_settings.py
|
22
|
import django
import os
import sys
PROJECT_ROOT = os.path.abspath(os.path.split(os.path.split(__file__)[0])[0])
ROOT_URLCONF = 'urls'
STATIC_URL = '/static/'
STATIC_ROOT = '%s/staticserve' % PROJECT_ROOT
STATICFILES_DIRS = (
('global', '%s/static' % PROJECT_ROOT),
)
UPLOADS_DIR_NAME = 'uploads'
MEDIA_URL = '/%s/' % UPLOADS_DIR_NAME
MEDIA_ROOT = os.path.join(PROJECT_ROOT, '%s' % UPLOADS_DIR_NAME)
IS_DEV = False
IS_STAGING = False
IS_PROD = False
IS_TEST = 'test' in sys.argv or 'test_coverage' in sys.argv
if django.VERSION[:2] >= (1, 3):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
else:
DATABASE_ENGINE = 'sqlite3'
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.humanize',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration'
]
SECRET_KEY = "38dh*skf8sjfhs287dh&^hd8&3hdg*j2&sd"
ACCOUNT_ACTIVATION_DAYS = 1
SITE_ID = 1
MIGRATION_MODULES = {
'authtoken': 'authtoken.migrations',
}
|
ampax/edx-platform-backup
|
refs/heads/live
|
lms/djangoapps/instructor_task/tests/factories.py
|
124
|
import json
import factory
from factory.django import DjangoModelFactory
from student.tests.factories import UserFactory as StudentUserFactory
from instructor_task.models import InstructorTask
from celery.states import PENDING
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class InstructorTaskFactory(DjangoModelFactory):
FACTORY_FOR = InstructorTask
task_type = 'rescore_problem'
course_id = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
task_input = json.dumps({})
task_key = None
task_id = None
task_state = PENDING
task_output = None
requester = factory.SubFactory(StudentUserFactory)
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/special/tests/test_pcf.py
|
53
|
"""Tests for parabolic cylinder functions.
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
def test_pbwa_segfault():
# Regression test for https://github.com/scipy/scipy/issues/6208.
#
# Data generated by mpmath.
#
w = 1.02276567211316867161
wp = -0.48887053372346189882
assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0)
def test_pbwa_nan():
# Check that NaN's are returned outside of the range in which the
# implementation is accurate.
pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)]
for p in pts:
assert_equal(sc.pbwa(*p), (np.nan, np.nan))
|
popazerty/openblackhole-SH4
|
refs/heads/master
|
lib/python/Components/Converter/EcmCryptoInfo.py
|
15
|
#
# EcmCryptoInfo Converter by mcbain // v0.1 // 20111109
#
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Poll import Poll
import os
ECM_INFO = '/tmp/ecm.info'
old_ecm_mtime = None
data = None
class EcmCryptoInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.active = False
self.visible = config.usage.show_cryptoinfo.value
self.textvalue = ''
self.poll_interval = 2*1000
if self.visible:
self.poll_enabled = True
else:
self.poll_enabled = False
@cached
def getText(self):
if not self.visible:
return ''
ecmdata = self.getEcmData()
return ecmdata
text = property(getText)
def getEcmData(self):
global old_ecm_mtime
global data
try:
ecm_mtime = os.stat(ECM_INFO).st_mtime
except:
ecm_mtime = None
if ecm_mtime != old_ecm_mtime:
old_ecm_mtime = ecm_mtime
data = self.getEcmInfo()
return data
def getEcmInfo(self):
try:
ecm = open(ECM_INFO, 'rb').readlines()
ecminfo = {}
for line in ecm:
d = line.split(':', 1)
if len(d) > 1:
ecminfo[d[0].strip()] = d[1].strip()
# ecminfo is dictionary
if (ecminfo == ''):
return 'No info from emu or FTA'
using = ecminfo.get('using', '')
if using:
# CCcam
if using == 'fta':
return 'Free to Air'
ecmInfoString=''
casys=''
state='Source: '
caid = ecminfo.get('caid', '')
address = ecminfo.get('address', '')
hops = ecminfo.get('hops', '')
ecmtime = ecminfo.get('ecm time', '')
if caid:
if caid.__contains__('x'):
idx = caid.index('x')
caid = caid[idx+1:]
if len(caid) == 3:
caid = '0%s' % caid
caid = caid.upper()
casys = 'Caid: '+caid
if address:
retaddress = '%s %s' % (_(' Source:'), address)
if address == ('/dev/sci0'):
state = (' Source: Lower slot')
if address == ('/dev/sci1'):
state = (' Source: Upper slot')
if address != ('/dev/sci0') and address != ('/dev/sci1'):
state = retaddress
if len(state) > 28:
state = ('%s...') % state[:25]
if hops:
hops = '%s %s' % (_(' Hops:'), hops)
if ecmtime:
ecmtime = '%s %ss' % (_(' Time:'), ecmtime)
if casys != '':
ecmInfoString = '%s ' % casys
if state != 'Source: ':
ecmInfoString = '%s%s ' % (ecmInfoString, state)
if state == 'Source: ':
ecmInfoString += state
ecmInfoString = '%s%s ' % (ecmInfoString, using)
if hops != '' and hops != ' Hops: 0':
ecmInfoString = '%s%s ' % (ecmInfoString, hops)
if ecmtime != '':
ecmInfoString = '%s%s ' % (ecmInfoString, ecmtime)
self.textvalue = ecmInfoString
else:
return 'No info from emu or unknown emu'
except:
self.textvalue = ''
return self.textvalue
|
Decker108/MangaFeedr
|
refs/heads/master
|
src/model/Feed.py
|
1
|
'''
Created on Mar 24, 2013
@author: Admin
'''
class Feed(object):
def __init__(self, channelTitle=None, channelLink=None, latestUpdateDate=None, markedReadDate=None):
'''Constructor'''
self.id = None
self.markedReadDate = markedReadDate
self.latestUpdateDate = latestUpdateDate
self.channelTitle = channelTitle
self.channelLink = channelLink
self.brokenLink = 0
def __str__(self):
return "".join(["Title: ",
str(self.channelTitle),
" Link: ",
str(self.channelLink)])
|
Work4Labs/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/model_inheritance_regress/models.py
|
75
|
import datetime
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
def __unicode__(self):
return u"%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __unicode__(self):
return u"%s the italian restaurant" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __unicode__(self):
return u"%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, parent_link=True)
class Supplier(models.Model):
restaurant = models.ForeignKey(Restaurant)
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier,related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __unicode__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __unicode__(self):
return self.base_name
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __unicode__(self):
return "PK = %d, base_name = %s, derived_name = %s" \
% (self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = u'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __unicode__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField()
class TrainStation(Station):
zone = models.IntegerField()
|
EliotBerriot/django
|
refs/heads/master
|
django/views/generic/detail.py
|
306
|
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
if self.object._deferred:
obj = obj._meta.proxy_for_model
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
object_meta = self.object._meta
if self.object._deferred:
object_meta = self.object._meta.proxy_for_model._meta
names.append("%s/%s%s.html" % (
object_meta.app_label,
object_meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
|
smaato/django-ses
|
refs/heads/master
|
example/views.py
|
6
|
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.mail import send_mail, EmailMessage
from django.shortcuts import render_to_response
def index(request):
return render_to_response('index.html')
def send_email(request):
if request.method == 'POST':
try:
subject = request.POST['subject']
message = request.POST['message']
from_email = request.POST['from']
html_message = bool(request.POST.get('html-message', False))
recipient_list = [request.POST['to']]
email = EmailMessage(subject, message, from_email, recipient_list)
if html_message:
email.content_subtype = 'html'
email.send()
except KeyError:
return HttpResponse('Please fill in all fields')
return HttpResponse('Email sent :)')
else:
return render_to_response('send-email.html')
|
ares/robottelo
|
refs/heads/master
|
robottelo/ui/products.py
|
3
|
"""Implements Products UI"""
from robottelo.ui.base import Base
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class Products(Base):
"""Manipulates Products from UI"""
is_katello = True
def navigate_to_entity(self):
"""Navigate to Product entity page"""
Navigator(self.browser).go_to_products()
def _search_locator(self):
"""Specify locator for Product key entity search procedure"""
return locators['prd.select']
def create(self, name, description=None, sync_plan=None, startdate=None,
create_sync_plan=False, gpg_key=None, sync_interval=None):
"""Creates new product from UI"""
self.click(locators['prd.new'])
self.assign_value(common_locators['name'], name)
if sync_plan and not create_sync_plan:
self.select(locators['prd.sync_plan'], sync_plan)
elif sync_plan and create_sync_plan:
self.click(locators['prd.new_sync_plan'])
self.assign_value(common_locators['name'], name)
if sync_interval:
self.select(locators['prd.sync_interval'], sync_interval)
self.assign_value(locators['prd.sync_startdate'], startdate)
self.click(common_locators['create'])
if gpg_key:
self.select(common_locators['gpg_key'], gpg_key)
if description:
self.assign_value(common_locators['description'], description)
self.click(common_locators['create'])
def update(self, name, new_name=None, new_desc=None,
new_sync_plan=None, new_gpg_key=None):
"""Updates product from UI"""
self.search_and_click(name)
self.click(tab_locators['prd.tab_details'])
if new_name:
self.click(locators['prd.name_edit'])
self.assign_value(locators['prd.name_update'], new_name)
self.click(common_locators['save'])
if new_desc:
self.click(locators['prd.desc_edit'])
self.assign_value(locators['prd.desc_update'], new_name)
self.click(common_locators['save'])
if new_gpg_key:
self.click(locators['prd.gpg_key_edit'])
self.select(locators['prd.gpg_key_update'], new_gpg_key)
self.click(common_locators['save'])
if new_sync_plan:
self.click(locators['prd.sync_plan_edit'])
self.select(locators['prd.sync_plan_update'], new_sync_plan)
self.click(common_locators['save'])
|
codester2/devide
|
refs/heads/master
|
modules/vtk_basic/vtkXMLPImageDataWriter.py
|
7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkXMLPImageDataWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkXMLPImageDataWriter(), 'Writing vtkXMLPImageData.',
('vtkXMLPImageData',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
DevBrush/latchbox
|
refs/heads/master
|
vendor/src/github.com/nsf/termbox-go/collect_terminfo.py
|
184
|
#!/usr/bin/env python
import sys, os, subprocess
def escaped(s):
return repr(s)[1:-1]
def tput(term, name):
try:
return subprocess.check_output(['tput', '-T%s' % term, name]).decode()
except subprocess.CalledProcessError as e:
return e.output.decode()
def w(s):
if s == None:
return
sys.stdout.write(s)
terminals = {
'xterm' : 'xterm',
'rxvt-256color' : 'rxvt_256color',
'rxvt-unicode' : 'rxvt_unicode',
'linux' : 'linux',
'Eterm' : 'eterm',
'screen' : 'screen'
}
keys = [
"F1", "kf1",
"F2", "kf2",
"F3", "kf3",
"F4", "kf4",
"F5", "kf5",
"F6", "kf6",
"F7", "kf7",
"F8", "kf8",
"F9", "kf9",
"F10", "kf10",
"F11", "kf11",
"F12", "kf12",
"INSERT", "kich1",
"DELETE", "kdch1",
"HOME", "khome",
"END", "kend",
"PGUP", "kpp",
"PGDN", "knp",
"KEY_UP", "kcuu1",
"KEY_DOWN", "kcud1",
"KEY_LEFT", "kcub1",
"KEY_RIGHT", "kcuf1"
]
funcs = [
"T_ENTER_CA", "smcup",
"T_EXIT_CA", "rmcup",
"T_SHOW_CURSOR", "cnorm",
"T_HIDE_CURSOR", "civis",
"T_CLEAR_SCREEN", "clear",
"T_SGR0", "sgr0",
"T_UNDERLINE", "smul",
"T_BOLD", "bold",
"T_BLINK", "blink",
"T_REVERSE", "rev",
"T_ENTER_KEYPAD", "smkx",
"T_EXIT_KEYPAD", "rmkx"
]
def iter_pairs(iterable):
iterable = iter(iterable)
while True:
yield (next(iterable), next(iterable))
def do_term(term, nick):
w("// %s\n" % term)
w("var %s_keys = []string{\n\t" % nick)
for k, v in iter_pairs(keys):
w('"')
w(escaped(tput(term, v)))
w('",')
w("\n}\n")
w("var %s_funcs = []string{\n\t" % nick)
for k,v in iter_pairs(funcs):
w('"')
if v == "sgr":
w("\\033[3%d;4%dm")
elif v == "cup":
w("\\033[%d;%dH")
else:
w(escaped(tput(term, v)))
w('", ')
w("\n}\n\n")
def do_terms(d):
w("var terms = []struct {\n")
w("\tname string\n")
w("\tkeys []string\n")
w("\tfuncs []string\n")
w("}{\n")
for k, v in d.items():
w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v))
w("}\n\n")
w("// +build !windows\n\npackage termbox\n\n")
for k,v in terminals.items():
do_term(k, v)
do_terms(terminals)
|
Zeken/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg-sampler.lv2/waflib/Tools/bison.py
|
332
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task
from waflib.TaskGen import extension
class bison(Task.Task):
color='BLUE'
run_str='${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}'
ext_out=['.h']
@extension('.y','.yc','.yy')
def big_bison(self,node):
has_h='-d'in self.env['BISONFLAGS']
outs=[]
if node.name.endswith('.yc'):
outs.append(node.change_ext('.tab.cc'))
if has_h:
outs.append(node.change_ext('.tab.hh'))
else:
outs.append(node.change_ext('.tab.c'))
if has_h:
outs.append(node.change_ext('.tab.h'))
tsk=self.create_task('bison',node,outs)
tsk.cwd=node.parent.get_bld().abspath()
self.source.append(outs[0])
def configure(conf):
conf.find_program('bison',var='BISON')
conf.env.BISONFLAGS=['-d']
|
leggitta/mne-python
|
refs/heads/master
|
mne/stats/parametric.py
|
3
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import numpy as np
from functools import reduce
from string import ascii_uppercase
from ..externals.six import string_types
from ..utils import deprecated
from ..fixes import matrix_rank
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def _f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test
p-value : float
The associated p-value from the F-distribution
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homocedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
some loss of power
The algorithm is from Heiman[2], pp.394-7.
See scipy.stats.f_oneway that should give the same results while
being less efficient
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
from scipy import stats
sf = stats.f.sf
n_classes = len(args)
n_samples_per_class = np.array([len(a) for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = reduce(lambda x, y: x + y,
[np.sum(a ** 2, axis=0) for a in args])
sums_args = [np.sum(a, axis=0) for a in args]
square_of_sums_alldata = reduce(lambda x, y: x + y, sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = sf(dfbn, dfwn, f)
return f, prob
def f_oneway(*args):
"""Call scipy.stats.f_oneway, but return only f-value"""
return _f_oneway(*args)[0]
def _map_effects(n_factors, effects):
"""Map effects to indices"""
if n_factors > len(ascii_uppercase):
raise ValueError('Maximum number of factors supported is 26')
factor_names = list(ascii_uppercase[:n_factors])
if isinstance(effects, string_types):
if '*' in effects and ':' in effects:
raise ValueError('Not "*" and ":" permitted in effects')
elif '+' in effects and ':' in effects:
raise ValueError('Not "+" and ":" permitted in effects')
elif effects == 'all':
effects = None
elif len(effects) == 1 or ':' in effects:
effects = [effects]
elif '+' in effects:
# all main effects
effects = effects.split('+')
elif '*' in effects:
pass # handle later
else:
raise ValueError('"{0}" is not a valid option for "effects"'
.format(effects))
if isinstance(effects, list):
bad_names = [e for e in effects if e not in factor_names]
if len(bad_names) > 1:
raise ValueError('Effect names: {0} are not valid. They should '
'the first `n_factors` ({1}) characters from the'
'alphabet'.format(bad_names, n_factors))
indices = list(np.arange(2 ** n_factors - 1))
names = list()
for this_effect in indices:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
this_code = (n_factors - 1) - np.where(contrast_idx == 1)[0]
this_name = [factor_names[e] for e in this_code]
this_name.sort()
names.append(':'.join(this_name))
if effects is None or isinstance(effects, string_types):
effects_ = names
else:
effects_ = effects
selection = [names.index(sel) for sel in effects_]
names = [names[sel] for sel in selection]
if isinstance(effects, string_types):
if '*' in effects:
# hierarchical order of effects
# the * based effect can be used as stop index
sel_ind = names.index(effects.replace('*', ':')) + 1
names = names[:sel_ind]
selection = selection[:sel_ind]
return selection, names
def _get_contrast_indices(effect_idx, n_factors):
"""Henson's factor coding, see num2binvec"""
binrepr = np.binary_repr(effect_idx, n_factors)
return np.array([int(i) for i in binrepr], dtype=int)
def _iter_contrasts(n_subjects, factor_levels, effect_picks):
""" Aux Function: Setup contrasts """
from scipy.signal import detrend
sc = []
n_factors = len(factor_levels)
# prepare computation of Kronecker products
for n_levels in factor_levels:
# for each factor append
# 1) column vector of length == number of levels,
# 2) square matrix with diagonal == number of levels
# main + interaction effects for contrasts
sc.append([np.ones([n_levels, 1]),
detrend(np.eye(n_levels), type='constant')])
for this_effect in effect_picks:
contrast_idx = _get_contrast_indices(this_effect + 1, n_factors)
c_ = sc[0][contrast_idx[n_factors - 1]]
for i_contrast in range(1, n_factors):
this_contrast = contrast_idx[(n_factors - 1) - i_contrast]
c_ = np.kron(c_, sc[i_contrast][this_contrast])
df1 = matrix_rank(c_)
df2 = df1 * (n_subjects - 1)
yield c_, df1, df2
@deprecated('"f_threshold_twoway_rm" is deprecated and will be removed in'
'MNE-0.11. Please use f_threshold_mway_rm instead')
def f_threshold_twoway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
return f_threshold_mway_rm(
n_subjects=n_subjects, factor_levels=factor_levels,
effects=effects, pvalue=pvalue)
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B',
pvalue=0.05):
""" Compute f-value thesholds for a two-way ANOVA
Parameters
----------
n_subjects : int
The number of subjects to be analyzed.
factor_levels : list-like
The number of levels per factor.
effects : str
A string denoting the effect to be returned. The following
mapping is currently supported:
'A': main effect of A
'B': main effect of B
'A:B': interaction effect
'A+B': both main effects
'A*B': all three effects
pvalue : float
The p-value to be thresholded.
Returns
-------
f_threshold : list | float
list of f-values for each effect if the number of effects
requested > 2, else float.
See Also
--------
f_oneway
f_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
effect_picks, _ = _map_effects(len(factor_levels), effects)
f_threshold = []
for _, df1, df2 in _iter_contrasts(n_subjects, factor_levels,
effect_picks):
f_threshold.append(f(df1, df2).isf(pvalue))
return f_threshold if len(f_threshold) > 1 else f_threshold[0]
# The following functions based on MATLAB code by Rik Henson
# and Python code from the pvttble toolbox by Roger Lew.
@deprecated('"f_twoway_rm" is deprecated and will be removed in MNE 0.11."'
" Please use f_mway_rm instead")
def f_twoway_rm(data, factor_levels, effects='A*B', alpha=0.05,
correction=False, return_pvals=True):
"""This function is deprecated, use `f_mway_rm` instead"""
return f_mway_rm(data=data, factor_levels=factor_levels, effects=effects,
alpha=alpha, correction=correction,
return_pvals=return_pvals)
def f_mway_rm(data, factor_levels, effects='all', alpha=0.05,
correction=False, return_pvals=True):
"""M-way repeated measures ANOVA for fully balanced designs
Parameters
----------
data : ndarray
3D array where the first two dimensions are compliant
with a subjects X conditions scheme where the first
factor repeats slowest::
A1B1 A1B2 A2B1 A2B2
subject 1 1.34 2.53 0.97 1.74
subject ... .... .... .... ....
subject k 2.45 7.90 3.09 4.76
The last dimensions is thought to carry the observations
for mass univariate analysis.
factor_levels : list-like
The number of levels per factor.
effects : str | list
A string denoting the effect to be returned. The following
mapping is currently supported (example with 2 factors):
* ``'A'``: main effect of A
* ``'B'``: main effect of B
* ``'A:B'``: interaction effect
* ``'A+B'``: both main effects
* ``'A*B'``: all three effects
* ``'all'``: all effects (equals 'A*B' in a 2 way design)
If list, effect names are used: ``['A', 'B', 'A:B']``.
alpha : float
The significance threshold.
correction : bool
The correction method to be employed if one factor has more than two
levels. If True, sphericity correction using the Greenhouse-Geisser
method will be applied.
return_pvals : bool
If True, return p values corresponding to f values.
Returns
-------
f_vals : ndarray
An array of f values with length corresponding to the number
of effects estimated. The shape depends on the number of effects
estimated.
p_vals : ndarray
If not requested via return_pvals, defaults to an empty array.
See Also
--------
f_oneway
f_threshold_mway_rm
Notes
-----
.. versionadded:: 0.10
"""
from scipy.stats import f
if data.ndim == 2: # general purpose support, e.g. behavioural data
data = data[:, :, np.newaxis]
elif data.ndim > 3: # let's allow for some magic here.
data = data.reshape(
data.shape[0], data.shape[1], np.prod(data.shape[2:]))
effect_picks, _ = _map_effects(len(factor_levels), effects)
n_obs = data.shape[2]
n_replications = data.shape[0]
# pute last axis in fornt to 'iterate' over mass univariate instances.
data = np.rollaxis(data, 2)
fvalues, pvalues = [], []
for c_, df1, df2 in _iter_contrasts(n_replications, factor_levels,
effect_picks):
y = np.dot(data, c_)
b = np.mean(y, axis=1)[:, np.newaxis, :]
ss = np.sum(np.sum(y * b, axis=2), axis=1)
mse = (np.sum(np.sum(y * y, axis=2), axis=1) - ss) / (df2 / df1)
fvals = ss / mse
fvalues.append(fvals)
if correction:
# sample covariances, leave off "/ (y.shape[1] - 1)" norm because
# it falls out.
v = np.array([np.dot(y_.T, y_) for y_ in y])
v = (np.array([np.trace(vv) for vv in v]) ** 2 /
(df1 * np.sum(np.sum(v * v, axis=2), axis=1)))
eps = v
df1, df2 = np.zeros(n_obs) + df1, np.zeros(n_obs) + df2
if correction:
df1, df2 = [d[None, :] * eps for d in (df1, df2)]
if return_pvals:
pvals = f(df1, df2).sf(fvals)
else:
pvals = np.empty(0)
pvalues.append(pvals)
# handle single effect returns
return [np.squeeze(np.asarray(vv)) for vv in (fvalues, pvalues)]
|
nirmeshk/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/views/app3/__init__.py
|
9480
|
#
|
mhahn/troposphere
|
refs/heads/master
|
tests/test_rds.py
|
7
|
import unittest
import troposphere.rds as rds
from troposphere import If, Parameter, Ref
class TestRDS(unittest.TestCase):
def test_it_allows_an_rds_instance_created_from_a_snapshot(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=100,
DBInstanceClass='db.m1.small',
Engine='MySQL',
DBSnapshotIdentifier='SomeSnapshotIdentifier'
)
rds_instance.JSONrepr()
def test_it_allows_an_rds_instance_with_master_username_and_password(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword'
)
rds_instance.JSONrepr()
def test_it_rds_instances_require_either_a_snapshot_or_credentials(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL'
)
with self.assertRaisesRegexp(
ValueError,
'Either \(MasterUsername and MasterUserPassword\) or'
' DBSnapshotIdentifier are required'
):
rds_instance.JSONrepr()
def test_it_allows_an_rds_replica(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier'
)
rds_instance.JSONrepr()
def test_replica_settings_are_inherited(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier',
BackupRetentionPeriod="1",
DBName="SomeName",
MasterUsername="SomeUsername",
MasterUserPassword="SomePassword",
PreferredBackupWindow="10:00-11:00",
MultiAZ=True,
DBSnapshotIdentifier="SomeDBSnapshotIdentifier",
DBSubnetGroupName="SomeDBSubnetGroupName",
)
with self.assertRaisesRegexp(
ValueError,
'BackupRetentionPeriod, DBName, DBSnapshotIdentifier, '
'DBSubnetGroupName, MasterUserPassword, MasterUsername, '
'MultiAZ, PreferredBackupWindow '
'properties can\'t be provided when '
'SourceDBInstanceIdentifier is present '
'AWS::RDS::DBInstance.'
):
rds_instance.JSONrepr()
def test_it_rds_instances_require_encryption_if_kms_key_provided(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
KmsKeyId='arn:aws:kms:us-east-1:123456789012:key/'
'12345678-1234-1234-1234-123456789012'
)
with self.assertRaisesRegexp(
ValueError,
'If KmsKeyId is provided, StorageEncrypted is required'
):
rds_instance.JSONrepr()
def test_it_allows_an_rds_instance_with_iops(self):
# ensure troposphere works with longs and ints
try:
long_number = long(2000)
except NameError:
# Python 3 doesn't have 'long' anymore
long_number = 2000
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=200,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
StorageType='io1',
Iops=long_number,
)
rds_instance.JSONrepr()
def test_optiongroup(self):
rds_optiongroup = rds.OptionGroup(
"OracleOptionGroup",
EngineName="oracle-ee",
MajorEngineVersion="12.1",
OptionGroupDescription="A test option group",
OptionConfigurations=[
rds.OptionConfiguration(
DBSecurityGroupMemberships=["default"],
OptionName="OEM",
Port="5500",
),
rds.OptionConfiguration(
OptionName="APEX",
),
]
)
rds_optiongroup.JSONrepr()
def test_fail_az_and_multiaz(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone="us-east-1",
MultiAZ=True)
with self.assertRaisesRegexp(ValueError, "if MultiAZ is set to "):
i.JSONrepr()
def test_az_and_multiaz_funcs(self):
AWS_NO_VALUE = "AWS::NoValue"
db_az = "us-east-1"
db_multi_az = Parameter("dbmultiaz", Type="String")
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
AvailabilityZone=If("db_az", Ref(db_az), Ref(AWS_NO_VALUE)),
MultiAZ=Ref(db_multi_az),
)
i.validate()
def test_io1_storage_type_and_iops(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
AllocatedStorage=10,
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1')
with self.assertRaisesRegexp(ValueError,
"Must specify Iops if "):
i.JSONrepr()
def test_storage_to_iops_ratio(self):
i = rds.DBInstance(
"NoAZAndMultiAZ",
MasterUsername="myuser",
MasterUserPassword="mypassword",
DBInstanceClass="db.m1.small",
Engine="postgres",
StorageType='io1',
Iops=4000,
AllocatedStorage=10)
with self.assertRaisesRegexp(ValueError,
" must be at least 100 "):
i.JSONrepr()
i.AllocatedStorage = 100
with self.assertRaisesRegexp(ValueError,
" must be no less than 1/10th "):
i.JSONrepr()
i.AllocatedStorage = 400
i.JSONrepr()
class TestRDSValidators(unittest.TestCase):
def test_validate_iops(self):
with self.assertRaises(ValueError):
rds.validate_iops(500)
with self.assertRaises(ValueError):
rds.validate_iops(20000)
rds.validate_iops(2000)
def test_validate_storage_type(self):
for t in rds.VALID_STORAGE_TYPES:
rds.validate_storage_type(t)
with self.assertRaises(ValueError):
rds.validate_storage_type("bad_storage_type")
def test_validate_engine(self):
for e in rds.VALID_DB_ENGINES:
rds.validate_engine(e)
with self.assertRaises(ValueError):
rds.validate_engine("bad_engine")
def test_validate_license_model(self):
for lm in rds.VALID_LICENSE_MODELS:
rds.validate_license_model(lm)
with self.assertRaises(ValueError):
rds.validate_license_model("bad_license_model")
def test_validate_backup_window(self):
good_windows = ("10:00-11:00", "22:00-06:00")
for w in good_windows:
rds.validate_backup_window(w)
bad_format = ("bad_backup_window", "28:11-10:00", "10:00-28:11")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_backup_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_backup_window("10:00-10:10")
def test_validate_maintenance_window(self):
good_windows = ("Mon:10:00-Mon:16:30", "Mon:10:00-Wed:10:00",
"Sun:16:00-Mon:11:00")
for w in good_windows:
rds.validate_maintenance_window(w)
bad_format = ("bad_mainteance", "Mon:10:00-Tue:28:00", "10:00-22:00")
for w in bad_format:
with self.assertRaisesRegexp(ValueError, "must be in the format"):
rds.validate_maintenance_window(w)
bad_days = ("Boo:10:00-Woo:10:30", "Boo:10:00-Tue:10:30",
"Mon:10:00-Boo:10:30")
for w in bad_days:
with self.assertRaisesRegexp(ValueError, " day part of ranges "):
rds.validate_maintenance_window(w)
with self.assertRaisesRegexp(ValueError, "must be at least 30 "):
rds.validate_maintenance_window("Mon:10:00-Mon:10:10")
def test_validate_backup_retention_period(self):
for d in (1, 10, 15, 35):
rds.validate_backup_retention_period(d)
with self.assertRaisesRegexp(ValueError,
" cannot be larger than 35 "):
rds.validate_backup_retention_period(40)
rds.validate_backup_retention_period(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.