code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
"""AFF4 Objects to enforce ACL policies."""
import email
import re
import urllib
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import events
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.authorization import client_approval_auth
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class Error(Exception):
"""Base exception class."""
class ErrorClientDoesNotExist(Error):
"""Raised when trying to check approvals on non-existent client."""
class Approval(aff4.AFF4Object):
"""An abstract approval request object.
This object normally lives within the namespace:
aff4:/ACL/...
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object.
"""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""The Schema for the Approval class."""
REQUESTOR = aff4.Attribute("aff4:approval/requestor", rdfvalue.RDFString,
"Requestor of the approval.")
APPROVER = aff4.Attribute("aff4:approval/approver", rdfvalue.RDFString,
"An approver for the request.", "approver")
SUBJECT = aff4.Attribute("aff4:approval/subject", rdfvalue.RDFURN,
"Subject of the approval. I.e. the resource that "
"requires approved access.")
REASON = aff4.Attribute("aff4:approval/reason",
rdfvalue.RDFString,
"The reason for requesting access to this client.")
EMAIL_MSG_ID = aff4.Attribute("aff4:approval/email_msg_id",
rdfvalue.RDFString,
"The email thread message ID for this"
"approval. Storing this allows for "
"conversation threading.")
EMAIL_CC = aff4.Attribute("aff4:approval/email_cc", rdfvalue.RDFString,
"Comma separated list of email addresses to "
"CC on approval emails.")
NOTIFIED_USERS = aff4.Attribute("aff4:approval/notified_users",
rdfvalue.RDFString,
"Comma-separated list of GRR users "
"notified about this approval.")
def CheckAccess(self, token):
"""Check that this approval applies to the given token.
Args:
token: User's credentials token.
Returns:
True if access is granted, raises access_control.UnauthorizedAccess
otherwise.
Raises:
access_control.UnauthorizedAccess: if access is rejected.
"""
_ = token
raise NotImplementedError()
@staticmethod
def GetApprovalForObject(object_urn, token=None, username=""):
"""Looks for approvals for an object and returns available valid tokens.
Args:
object_urn: Urn of the object we want access to.
token: The token to use to lookup the ACLs.
username: The user to get the approval for, if "" we get it from the
token.
Returns:
A token for access to the object on success, otherwise raises.
Raises:
UnauthorizedAccess: If there are no valid approvals available.
"""
if token is None:
raise access_control.UnauthorizedAccess(
"No token given, cannot authenticate.")
if not username:
username = token.username
approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add(object_urn.Path()).Add(
username)
children_urns = list(
aff4.FACTORY.ListChildren(
approvals_root_urn, token=token))
if not children_urns:
raise access_control.UnauthorizedAccess(
"No approval found for user %s" % utils.SmartStr(username),
subject=object_urn)
last_error = None
approvals = aff4.FACTORY.MultiOpen(
children_urns,
mode="r",
aff4_type=Approval,
age=aff4.ALL_TIMES,
token=token)
for approval in approvals:
try:
test_token = access_control.ACLToken(
username=username, reason=approval.Get(approval.Schema.REASON))
approval.CheckAccess(test_token)
return test_token
except access_control.UnauthorizedAccess as e:
last_error = e
if last_error:
# We tried all possible approvals, but got no usable results.
raise access_control.UnauthorizedAccess(last_error, subject=object_urn)
else:
# If last error is None, means that none of the URNs in children_urns
# could be opened. This shouldn't really happen ever, but we have
# to make sure to provide a meaningful error message.
raise access_control.UnauthorizedAccess(
"Couldn't open any of %d approvals "
"for user %s" % (len(children_urns), utils.SmartStr(username)),
subject=object_urn)
class ApprovalWithApproversAndReason(Approval):
"""Generic all-purpose base approval class.
This object normally lives within the aff4:/ACL namespace. Username is
encoded into this object's urn. Subject's urn (i.e. urn of the object
which this approval corresponds for) can also be inferred from this approval's
urn.
This class provides following functionality:
* Number of approvers configured by ACL.approvers_required configuration
parameter is required for this approval's CheckAccess() to succeed.
* Optional checked_approvers_label attribute may be specified. Then
at least min_approvers_with_label number of approvers will have to
have checked_approvers_label label in order for CheckAccess to
succeed.
* Break-glass functionality. If this approval's BREAK_GLASS attribute is
set, user's token is marked as emergency token and CheckAccess() returns
True.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows.
"""
checked_approvers_label = None
min_approvers_with_label = 1
class SchemaCls(Approval.SchemaCls):
"""The Schema for the ClientAccessApproval class."""
LIFETIME = aff4.Attribute(
"aff4:approval/lifetime",
rdfvalue.RDFInteger,
"The number of seconds an approval is valid for.",
default=0)
BREAK_GLASS = aff4.Attribute(
"aff4:approval/breakglass", rdfvalue.RDFDatetime,
"The date when this break glass approval will expire.")
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn.
Returns:
(username, subject_urn) tuple.
"""
raise NotImplementedError()
def GetApprovers(self, now):
lifetime = rdfvalue.Duration(
self.Get(self.Schema.LIFETIME) or config_lib.CONFIG["ACL.token_expiry"])
# Check that there are enough approvers.
approvers = set()
for approver in self.GetValuesForAttribute(self.Schema.APPROVER):
if approver.age + lifetime > now:
approvers.add(utils.SmartStr(approver))
return approvers
def CheckAccess(self, token):
"""Enforce a dual approver policy for access."""
namespace, _ = self.urn.Split(2)
if namespace != "ACL":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
subject=self.urn,
requested_access=token.requested_access)
user, subject_urn = self.InferUserAndSubjectFromUrn()
if user != token.username:
raise access_control.UnauthorizedAccess(
"Approval object is not for user %s." % token.username,
subject=self.urn,
requested_access=token.requested_access)
now = rdfvalue.RDFDatetime.Now()
# Is this an emergency access?
break_glass = self.Get(self.Schema.BREAK_GLASS)
if break_glass and now < break_glass:
# This tags the token as an emergency token.
token.is_emergency = True
return True
# Check that there are enough approvers.
approvers = self.GetNonExpiredApprovers()
if len(approvers) < config_lib.CONFIG["ACL.approvers_required"]:
msg = ("Requires %s approvers for access." %
config_lib.CONFIG["ACL.approvers_required"])
raise access_control.UnauthorizedAccess(
msg, subject=subject_urn, requested_access=token.requested_access)
# Check User labels
if self.checked_approvers_label:
approvers_with_label = []
# We need to check labels with high privilege since normal users can
# inspect other user's labels.
for approver in approvers:
try:
user = aff4.FACTORY.Open(
"aff4:/users/%s" % approver,
aff4_type=aff4_users.GRRUser,
token=token.SetUID())
if self.checked_approvers_label in user.GetLabelsNames():
approvers_with_label.append(approver)
except IOError:
pass
if len(approvers_with_label) < self.min_approvers_with_label:
raise access_control.UnauthorizedAccess(
"At least %d approver(s) should have '%s' label." %
(self.min_approvers_with_label, self.checked_approvers_label),
subject=subject_urn,
requested_access=token.requested_access)
return True
def GetNonExpiredApprovers(self):
"""Returns a list of usernames of approvers who approved this approval."""
lifetime = rdfvalue.Duration(
self.Get(self.Schema.LIFETIME) or config_lib.CONFIG["ACL.token_expiry"])
# Check that there are enough approvers.
#
# TODO(user): approvals have to be opened with
# age=aff4.ALL_TIMES because versioning is used to store lists
# of approvers. This doesn't seem right and has to be fixed.
approvers = set()
now = rdfvalue.RDFDatetime.Now()
for approver in self.GetValuesForAttribute(self.Schema.APPROVER):
if approver.age + lifetime > now:
approvers.add(utils.SmartStr(approver))
return list(approvers)
class ClientApproval(ApprovalWithApproversAndReason):
"""An approval request for access to a specific client.
This object normally lives within the namespace:
aff4:/ACL/client_id/user/approval:<id>
Hence the client_id and user which is granted access are inferred from this
object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestClientApprovalFlow()
- GrantClientApprovalFlow()
- BreakGlassGrantClientApprovalFlow()
"""
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, client_id, user, _ = self.urn.Split(4)
return (user, rdf_client.ClientURN(client_id))
def CheckAccess(self, token):
super(ClientApproval, self).CheckAccess(token)
# If approvers isn't set and super-class checking passed, we're done.
if not client_approval_auth.CLIENT_APPROVAL_AUTH_MGR.IsActive():
return True
now = rdfvalue.RDFDatetime.Now()
approvers = self.GetApprovers(now)
requester, client_urn = self.InferUserAndSubjectFromUrn()
# Open the client object with superuser privs so we can get the list of
# labels
try:
client_object = aff4.FACTORY.Open(
client_urn,
mode="r",
aff4_type=aff4_grr.VFSGRRClient,
token=token.SetUID())
except aff4.InstantiationError:
raise ErrorClientDoesNotExist("Can't check label approvals on client %s "
"that doesn't exist" % client_urn)
client_labels = client_object.Get(client_object.Schema.LABELS, [])
for label in client_labels:
client_approval_auth.CLIENT_APPROVAL_AUTH_MGR.CheckApproversForLabel(
token, client_urn, requester, approvers, label.name)
return True
class HuntApproval(ApprovalWithApproversAndReason):
"""An approval request for running a specific hunt.
This object normally lives within the namespace:
aff4:/ACL/hunts/hunt_id/user_id/approval:<id>
Hence the hunt_id and user_id are inferred from this object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestHuntApprovalFlow()
- GrantHuntApprovalFlow()
"""
checked_approvers_label = "admin"
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, hunts_str, hunt_id, user, _ = self.urn.Split(5)
if hunts_str != "hunts":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
requested_access=self.token.requested_access)
return (user, aff4.ROOT_URN.Add("hunts").Add(hunt_id))
class CronJobApproval(ApprovalWithApproversAndReason):
"""An approval request for managing a specific cron job.
This object normally lives within the namespace:
aff4:/ACL/cron/cron_job_id/user_id/approval:<id>
Hence the hunt_id and user_id are inferred from this object's URN.
The aff4:/ACL namespace is not writable by users, hence all manipulation of
this object must be done via dedicated flows. These flows use the server's
access credentials for manipulating this object:
- RequestCronJobApprovalFlow()
- GrantCronJobApprovalFlow()
"""
checked_approvers_label = "admin"
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, cron_str, cron_job_name, user, _ = self.urn.Split(5)
if cron_str != "cron":
raise access_control.UnauthorizedAccess(
"Approval object has invalid urn %s." % self.urn,
requested_access=self.token.requested_access)
return (user, aff4.ROOT_URN.Add("cron").Add(cron_job_name))
class AbstractApprovalWithReasonMixin(object):
"""Abstract class for approval requests/grants."""
approval_type = None
def BuildApprovalUrn(self, approval_id):
"""Builds approval object urn."""
raise NotImplementedError()
def BuildApprovalSymlinksUrns(self, unused_approval_id):
"""Builds a list of symlinks to the approval object."""
return []
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
raise NotImplementedError()
def CreateReasonHTML(self, reason):
"""Creates clickable links in the reason where appropriate.
Args:
reason: reason string
Returns:
Reason string with HTML hrefs as appropriate.
Use a regex named group of "link":
(?P<link>sometext)
for things that should be turned into links.
"""
for link_re in config_lib.CONFIG.Get("Email.link_regex_list"):
reason = re.sub(link_re, r"""<a href="\g<link>">\g<link></a>""", reason)
return reason
@staticmethod
def ApprovalUrnBuilder(subject, user, approval_id):
"""Encode an approval URN."""
return aff4.ROOT_URN.Add("ACL").Add(subject).Add(user).Add(approval_id)
@staticmethod
def ApprovalSymlinkUrnBuilder(approval_type, subject_id, user, approval_id):
"""Build an approval symlink URN."""
return aff4.ROOT_URN.Add("users").Add(user).Add("approvals").Add(
approval_type).Add(subject_id).Add(approval_id)
class RequestApprovalWithReasonFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RequestApprovalWithReasonFlowArgs
class RequestApprovalWithReasonFlow(AbstractApprovalWithReasonMixin,
flow.GRRFlow):
"""Base flow class for flows that request approval of a certain type."""
args_type = RequestApprovalWithReasonFlowArgs
def BuildApprovalReviewUrlPath(self, approval_id):
"""Build the url path to the approval review page."""
raise NotImplementedError()
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
approval_id = "approval:%X" % utils.PRNG.GetULong()
self.state.approval_id = approval_id
approval_urn = self.BuildApprovalUrn(approval_id)
self.state.approval_urn = approval_urn
subject_title = self.BuildSubjectTitle()
email_msg_id = email.utils.make_msgid()
with aff4.FACTORY.Create(
approval_urn, self.approval_type, mode="w",
token=self.token) as approval_request:
approval_request.Set(
approval_request.Schema.SUBJECT(self.args.subject_urn))
approval_request.Set(
approval_request.Schema.REQUESTOR(self.token.username))
approval_request.Set(approval_request.Schema.REASON(self.args.reason))
approval_request.Set(approval_request.Schema.EMAIL_MSG_ID(email_msg_id))
cc_addresses = (self.args.email_cc_address,
config_lib.CONFIG.Get("Email.approval_cc_address"))
email_cc = ",".join(filter(None, cc_addresses))
# When we reply with the approval we want to cc all the people to whom the
# original approval was sent, to avoid people approving stuff that was
# already approved.
if email_cc:
reply_cc = ",".join((self.args.approver, email_cc))
else:
reply_cc = self.args.approver
approval_request.Set(approval_request.Schema.EMAIL_CC(reply_cc))
approval_request.Set(
approval_request.Schema.NOTIFIED_USERS(self.args.approver))
# We add ourselves as an approver as well (The requirement is that we have
# 2 approvers, so the requester is automatically an approver).
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
approval_link_urns = self.BuildApprovalSymlinksUrns(approval_id)
for link_urn in approval_link_urns:
with aff4.FACTORY.Create(
link_urn, aff4.AFF4Symlink, mode="w", token=self.token) as link:
link.Set(link.Schema.SYMLINK_TARGET(approval_urn))
# Notify to the users.
for user in self.args.approver.split(","):
user = user.strip()
try:
fd = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(user),
aff4_type=aff4_users.GRRUser,
mode="rw",
token=self.token)
except aff4.InstantiationError:
continue
fd.Notify("GrantAccess", approval_urn,
"Please grant access to %s" % subject_title, self.session_id)
fd.Close()
if not config_lib.CONFIG.Get("Email.send_approval_emails"):
return
reason = self.CreateReasonHTML(self.args.reason)
template = u"""
<html><body><h1>Approval to access
<a href='%(admin_ui)s#%(approval_url)s'>%(subject_title)s</a> requested.</h1>
The user "%(username)s" has requested access to
<a href='%(admin_ui)s#%(approval_url)s'>%(subject_title)s</a>
for the purpose of "%(reason)s".
Please click <a href='%(admin_ui)s#%(approval_url)s'>
here
</a> to review this request and then grant access.
<p>Thanks,</p>
<p>%(signature)s</p>
<p>%(image)s</p>
</body></html>"""
# If you feel like it, add a funny cat picture here :)
image = config_lib.CONFIG["Email.approval_signature"]
body = template % dict(
username=self.token.username,
reason=reason,
admin_ui=config_lib.CONFIG["AdminUI.url"],
subject_title=subject_title,
approval_url=self.BuildApprovalReviewUrlPath(approval_id),
image=image,
signature=config_lib.CONFIG["Email.signature"])
email_alerts.EMAIL_ALERTER.SendEmail(
self.args.approver,
utils.SmartStr(self.token.username),
u"Approval for %s to access %s." % (self.token.username, subject_title),
utils.SmartStr(body),
is_html=True,
cc_addresses=email_cc,
message_id=email_msg_id)
class GrantApprovalWithReasonFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.GrantApprovalWithReasonFlowArgs
class GrantApprovalWithReasonFlow(AbstractApprovalWithReasonMixin,
flow.GRRFlow):
"""Base flows class for flows that grant approval of a certain type."""
args_type = GrantApprovalWithReasonFlowArgs
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
if not self.args.delegate:
raise ValueError("Delegate can't be empty.")
if not self.args.reason:
raise ValueError("Reason can't be empty.")
approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add(
self.args.subject_urn.Path()).Add(self.args.delegate)
children_urns = list(
aff4.FACTORY.ListChildren(
approvals_root_urn, token=self.token))
if not children_urns:
raise access_control.UnauthorizedAccess(
"No approval found for user %s" % utils.SmartStr(self.token.username),
subject=self.args.subject_urn)
approvals = aff4.FACTORY.MultiOpen(
children_urns, mode="r", aff4_type=Approval, token=self.token)
found_approval_urn = None
for approval in approvals:
approval_reason = approval.Get(approval.Schema.REASON)
if (utils.SmartUnicode(approval_reason) ==
utils.SmartUnicode(self.args.reason) and
(not found_approval_urn or
approval_reason.age > found_approval_urn.age)):
found_approval_urn = approval.urn
found_approval_urn.age = approval_reason.age
if not found_approval_urn:
raise access_control.UnauthorizedAccess(
"No approval with reason '%s' found for user %s" %
(utils.SmartStr(self.args.reason),
utils.SmartStr(self.token.username)),
subject=self.args.subject_urn)
subject_title = self.BuildSubjectTitle()
access_url = self.BuildAccessUrl()
# This object must already exist.
try:
approval_request = aff4.FACTORY.Open(
found_approval_urn,
mode="rw",
aff4_type=self.approval_type,
token=self.token)
except IOError:
raise access_control.UnauthorizedAccess(
"Approval object does not exist.", requested_access="rw")
# We are now an approver for this request.
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
email_msg_id = utils.SmartStr(
approval_request.Get(approval_request.Schema.EMAIL_MSG_ID))
email_cc = utils.SmartStr(
approval_request.Get(approval_request.Schema.EMAIL_CC))
approval_request.Close(sync=True)
# Notify to the user.
fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("users").Add(self.args.delegate),
aff4_users.GRRUser,
mode="rw",
token=self.token)
fd.Notify("ViewObject", self.args.subject_urn,
"%s has granted you access to %s." %
(self.token.username, subject_title), self.session_id)
fd.Close()
if not config_lib.CONFIG.Get("Email.send_approval_emails"):
return
reason = self.CreateReasonHTML(self.args.reason)
template = u"""
<html><body><h1>Access to
<a href='%(admin_ui)s#%(subject_url)s'>%(subject_title)s</a> granted.</h1>
The user %(username)s has granted access to
<a href='%(admin_ui)s#%(subject_url)s'>%(subject_title)s</a> for the
purpose of "%(reason)s".
Please click <a href='%(admin_ui)s#%(subject_url)s'>here</a> to access it.
<p>Thanks,</p>
<p>%(signature)s</p>
</body></html>"""
body = template % dict(
subject_title=subject_title,
username=self.token.username,
reason=reason,
admin_ui=config_lib.CONFIG["AdminUI.url"],
subject_url=access_url,
signature=config_lib.CONFIG["Email.signature"])
# Email subject should match approval request, and we add message id
# references so they are grouped together in a thread by gmail.
subject = u"Approval for %s to access %s." % (
utils.SmartStr(self.args.delegate), subject_title)
headers = {"In-Reply-To": email_msg_id, "References": email_msg_id}
email_alerts.EMAIL_ALERTER.SendEmail(
utils.SmartStr(self.args.delegate),
utils.SmartStr(self.token.username),
subject,
utils.SmartStr(body),
is_html=True,
cc_addresses=email_cc,
headers=headers)
class BreakGlassGrantApprovalWithReasonFlow(GrantApprovalWithReasonFlow):
"""Grant an approval in an emergency."""
@flow.StateHandler()
def Start(self):
"""Create the Approval object and notify the Approval Granter."""
approval_id = "approval:%X" % utils.PRNG.GetULong()
approval_urn = self.BuildApprovalUrn(approval_id)
subject_title = self.BuildSubjectTitle()
# Create a new Approval object.
approval_request = aff4.FACTORY.Create(
approval_urn, aff4_type=self.approval_type, token=self.token)
approval_request.Set(approval_request.Schema.REASON(self.args.reason))
approval_request.AddAttribute(
approval_request.Schema.APPROVER(self.token.username))
# This is a break glass approval.
break_glass = rdfvalue.RDFDatetime.Now()
# By default a break_glass approval only lasts 24 hours.
break_glass += rdfvalue.Duration("24h")
approval_request.Set(approval_request.Schema.BREAK_GLASS, break_glass)
approval_request.Close(sync=True)
# Notify the user.
fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("users").Add(self.token.username),
aff4_users.GRRUser,
mode="rw",
token=self.token)
fd.Notify("ViewObject", self.args.subject_urn,
"An Emergency Approval has been granted to access "
"%s." % subject_title, self.session_id)
fd.Close()
template = u"""
<html><body><h1>Emergency Access Granted.</h1>
The user %(username)s has requested emergency access to %(subject_title)s.
for the purpose of: "%(reason)s".
This access has been logged and granted for 24 hours.
<p>Thanks,</p>
<p>%(signature)s</p>
</body></html>"""
body = template % dict(
client_id=self.client_id,
username=self.token.username,
subject_title=subject_title,
reason=self.args.reason,
signature=config_lib.CONFIG["Email.signature"]),
email_alerts.EMAIL_ALERTER.SendEmail(
config_lib.CONFIG["Monitoring.emergency_access_email"],
self.token.username,
u"Emergency approval granted for %s." % subject_title,
utils.SmartStr(body),
is_html=True,
cc_addresses=config_lib.CONFIG["Email.approval_cc_address"])
class RequestClientApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to access a client."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = ClientApproval
@property
def subject_urn(self):
return self.client_id or rdf_client.ClientURN(self.args.subject_urn)
def BuildApprovalUrn(self, approval_id):
"""Builds approval object urn."""
event = events.AuditEvent(
user=self.token.username,
action="CLIENT_APPROVAL_REQUEST",
client=self.subject_urn,
description=self.args.reason)
events.Events.PublishEvent("Audit", event, token=self.token)
return self.ApprovalUrnBuilder(self.subject_urn.Path(), self.token.username,
approval_id)
def BuildApprovalSymlinksUrns(self, approval_id):
"""Builds list of symlinks URNs for the approval object."""
return [
self.ApprovalSymlinkUrnBuilder("client",
self.subject_urn.Basename(),
self.token.username, approval_id)
]
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.subject_urn, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.subject_urn.Basename(), hostname)
def BuildApprovalReviewUrlPath(self, approval_id):
return "/".join([
"users", self.token.username, "approvals", "client",
self.subject_urn.Basename(), approval_id
])
class GrantClientApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant the approval requested."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = ClientApproval
@property
def subject_urn(self):
return self.client_id or rdf_client.ClientURN(self.args.subject_urn)
def BuildApprovalUrn(self, approval_id):
"""Builds approval object urn."""
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=self.token.username,
action="CLIENT_APPROVAL_GRANT",
client=self.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.subject_urn.Path(), self.args.delegate,
approval_id)
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode((("c", self.subject_urn),
("main", "HostInformation")))
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.subject_urn, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.subject_urn.Basename(), hostname)
class BreakGlassGrantClientApprovalFlow(BreakGlassGrantApprovalWithReasonFlow):
"""Grant an approval in an emergency."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = ClientApproval
def BuildApprovalUrn(self, approval_id):
"""Builds approval object urn."""
event = events.AuditEvent(
user=self.token.username,
action="CLIENT_APPROVAL_BREAK_GLASS_REQUEST",
client=self.client_id,
description=self.args.reason)
events.Events.PublishEvent("Audit", event, token=self.token)
return self.ApprovalUrnBuilder(self.client_id.Path(), self.token.username,
approval_id)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
hostname = client.Get(client.Schema.HOSTNAME)
return u"GRR client %s (%s)" % (self.client_id.Basename(), hostname)
class RequestHuntApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to access a client."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = HuntApproval
def BuildApprovalUrn(self, approval_id):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=self.token.username,
action="HUNT_APPROVAL_REQUEST",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.token.username, approval_id)
def BuildApprovalSymlinksUrns(self, approval_id):
"""Builds list of symlinks URNs for the approval object."""
return [
self.ApprovalSymlinkUrnBuilder("hunt",
self.args.subject_urn.Basename(),
self.token.username, approval_id)
]
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"hunt %s" % self.args.subject_urn.Basename()
def BuildApprovalReviewUrlPath(self, approval_id):
return "/".join([
"users", self.token.username, "approvals", "hunt",
self.args.subject_urn.Basename(), approval_id
])
class GrantHuntApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant the approval requested."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = HuntApproval
def BuildApprovalUrn(self, approval_id):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=self.token.username,
action="HUNT_APPROVAL_GRANT",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.args.delegate, approval_id)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"hunt %s" % self.args.subject_urn.Basename()
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode((("main", "ManageHunts"), ("hunt",
self.args.subject_urn)))
class RequestCronJobApprovalFlow(RequestApprovalWithReasonFlow):
"""A flow to request approval to manage a cron job."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = CronJobApproval
def BuildApprovalUrn(self, approval_id):
"""Builds approval object URN."""
# In this case subject_urn is a cron job's URN.
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=self.token.username,
action="CRON_APPROVAL_REQUEST",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.token.username, approval_id)
def BuildApprovalSymlinksUrns(self, approval_id):
"""Builds list of symlinks URNs for the approval object."""
return [
self.ApprovalSymlinkUrnBuilder("cron",
self.args.subject_urn.Basename(),
self.token.username, approval_id)
]
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"a cron job"
def BuildApprovalReviewUrlPath(self, approval_id):
return "/".join([
"users", self.token.username, "approvals", "cron-job",
self.args.subject_urn.Basename(), approval_id
])
class GrantCronJobApprovalFlow(GrantApprovalWithReasonFlow):
"""Grant approval to manage a cron job."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
approval_type = CronJobApproval
def BuildApprovalUrn(self):
"""Builds approval object URN."""
# In this case subject_urn is hunt's URN.
events.Events.PublishEvent(
"Audit",
events.AuditEvent(
user=self.token.username,
action="CRON_APPROVAL_GRANT",
urn=self.args.subject_urn,
description=self.args.reason),
token=self.token)
return self.ApprovalUrnBuilder(self.args.subject_urn.Path(),
self.args.delegate, self.args.reason)
def BuildSubjectTitle(self):
"""Returns the string with subject's title."""
return u"a cron job"
def BuildAccessUrl(self):
"""Builds the urn to access this object."""
return urllib.urlencode({"main": "ManageCron"})
| pidydx/grr | grr/lib/aff4_objects/security.py | Python | apache-2.0 | 35,585 |
from armor.graphics import specContour
from armor.initialise import *
import pickle, os
import numpy as np
import matplotlib.pyplot as plt
# COMPREF
#inputFolder = dp.root + 'labLogs2/powerSpec3/1404707377.16COMPREF_Rainband_March_2014/'
inputFolder = "C:/yau/1404716726.06COMPREF_Rainband_March_2014/"
outputFolder = inputFolder+ 'meanSpecs/'
testName = 'Contour_Spec_COMPREF_Rainband_March_2014'
L = os.listdir(inputFolder)
Ltotal = [v for v in L if 'XYZ.pydump' in v]
Lmax = [v for v in L if 'XYZmax.pydump' in v]
print Ltotal
print Lmax
########
####
##
#
L0=L
vmins = [0,0]
vmaxs = [0,0]
labels =['total', 'max']
XYZs = [0,0]
count = 0
XYZouts = [0,0]
for count , L in enumerate([Ltotal, Lmax]):
plt.close()
Z = np.zeros((13,8))
for frameCount, fileName in enumerate(L):
XYZ = pickle.load(open(inputFolder+fileName,'r'))
#X = XYZ['X']
#Y = XYZ['Y']
Z1 = XYZ['Z']
Z += Z1
XYZ['Z'] = Z/ (frameCount+1)
#vmins[count] = (np.log10(XYZ["Z"])* (Z>0)).min()
#vmaxs[count] = (np.log10(XYZ["Z"])* (Z>0)).max()
X = XYZ['X']
Y = XYZ['Y']
XYZouts[count] = specContour.specContour(XYZ, display=True, outputFolder=outputFolder,
vmin=-1.0, vmax=3.6,
fileName = testName+ labels[count] + "_average_of_" + str(frameCount+1) +'images.png')
plt.close()
XYZs[count] = {'X': X.copy(), 'Y': Y.copy(), 'Z': Z.copy()}
specContour.specContour(XYZs[0], XYZs[1], outputFolder=outputFolder, fileName=testName+"total-max.png")
#specContour.specContour(XYZs[0], XYZs[1], outputFolder=outputFolder, fileName=testName+"total-max.png", vmin=-.8, vmax=3.17)
print testName, "number of frames", frameCount+1
#
##
####
########
## set the "setMin/setMax" - edit here
#vmax = max(vmaxs)
#vmin = min(vmins)
vmax = 3.17
vmin = -0.944
print vmin, vmax
for count , L in enumerate([Ltotal, Lmax]):
plt.close()
XYZ['Z'] = XYZs[count]['Z']
#specContour.specContour(XYZ, display=True, outputFolder=outputFolder, fileName = testName+ labels[count] + "_average_of_" + str(frameCount+1) +'images.png')
specContour.specContour(XYZ, display=True, vmin = vmin, vmax=vmax,
outputFolder=outputFolder, fileName = testName+ labels[count] + "_average_of_" + str(frameCount+1) +'images_colourbars_matched_.png')
"""
#=============================================================================================
# WRFs all together
#inputFolder = dp.root + 'labLogs2/powerSpec3/1404707377.16COMPREF_Rainband_March_2014/'
inputFolder = "C:/yau/1404716726.08WRF_Rainband_March_2014/"
testName = 'Contour_Spec_WRF_Rainband_March_2014'
outputFolder = inputFolder+ 'meanSpecs/'
L = os.listdir(inputFolder)
L = [v for v in L if not ('0313.0300' in v or '0313.0600' in v or '0313.0900' in v) ]
Ltotal = [v for v in L if 'XYZ.pydump' in v]
Lmax = [v for v in L if 'XYZmax.pydump' in v]
L0=L
labels =['total', 'max']
XYZwrfs = [0,0]
count = 0
for count , L in enumerate([Ltotal, Lmax]):
Z = np.zeros((13,8))
plt.close()
for frameCount, fileName in enumerate(L):
XYZ = pickle.load(open(inputFolder+fileName,'r'))
#X = XYZ['X']
#Y = XYZ['Y']
Z1 = XYZ['Z']
Z += Z1
XYZ['Z'] = Z/ (frameCount+1)
specContour.specContour(XYZ, display=True, outputFolder=outputFolder, fileName = testName+ labels[count] + "_average_of_" + str(frameCount+1) +'images.png')
XYZwrfs[count] = XYZ
plt.close()
specContour.specContour(XYZs[0], XYZs[1], outputFolder=outputFolder, fileName=testName+"total-max.png")
print testName, "number of frames", frameCount+1
######################################################################################################
#outputFolder= 'testing/'
plt.close()
specContour.specContour(XYZs[0], XYZwrfs[0], outputFolder=outputFolder, fileName="Rainband_march_2014_COMPREF-versus-WRF-total-Spec.png")
plt.close()
specContour.specContour(XYZs[1], XYZwrfs[1], outputFolder=outputFolder, fileName="Rainband_march_2014_COMPREF-versus-WRF-max-Spec.png")
plt.close()
"""
#############################################################################################################################################
# individual WRFs
wrfLabels = [ 'WRF' + ("00"+str(v))[-2:] for v in range(1,21)]
inputFolder = "C:/yau/1404716726.08WRF_Rainband_March_2014/"
logFile = open (inputFolder + 'meanSpecs/' + str(time.time()) + 'log.txt','w')
logFile.write('Contour_Spec_WRF_Rainband_March_2014\n')
logFile.write('InputFolder:\t' + inputFolder + '\n')
logFile.write('WRF, Number of Frames, Diff in TotalSpec, Diff in MaxSpec\n')
for i in range(20):
#try:
testName = 'Contour_Spec_WRF_Rainband_March_2014_' + wrfLabels[i]
outputFolder = inputFolder+ 'meanSpecs/' + wrfLabels[i] + '/'
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
L = os.listdir(inputFolder)
L = [v for v in L if not ('0313.0300' in v or '0313.0600' in v or '0313.0900' in v) ]
L = [v for v in L if wrfLabels[i] in v] # pick out the outputs with from the WRF specified
Ltotal = [v for v in L if 'XYZ.pydump' in v]
Lmax = [v for v in L if 'XYZmax.pydump' in v]
L0=L
print '\n'.join([str(v) for v in Ltotal])
print "<-- Ltotal\n\n"
time.sleep(2)
print '\n'.join([str(v) for v in Lmax])
print "<-- Lmax\n\n"
time.sleep(3)
labels =['total', 'max']
XYZwrfs = [0,0]
count = 0
for count , L in enumerate([Ltotal, Lmax]):
Z = np.zeros((13,8))
plt.close()
for frameCount, fileName in enumerate(L):
XYZ = pickle.load(open(inputFolder+fileName,'r'))
#X = XYZ['X']
#Y = XYZ['Y']
Z1 = XYZ['Z']
Z += Z1
XYZ['Z'] = Z/ (frameCount+1)
XYZout = specContour.specContour(XYZ, display=True, outputFolder=outputFolder, fileName = testName+ labels[count] + "_average_of_" + str(frameCount+1) +'images.png')
XYZwrfs[count] = XYZ
plt.close()
XYZout, XYZwrfout = specContour.specContour(XYZs[0], XYZwrfs[0], outputFolder=outputFolder, fileName= "Total_Spec_COMPREF-versus-" + wrfLabels[i] +".png")
plt.close()
XYZout2, XYZwrfout2=specContour.specContour(XYZs[1], XYZwrfs[1], outputFolder=outputFolder, fileName= "Max_Spec_COMPREF-versus-" + wrfLabels[i] +".png")
plt.close()
print testName, "number of frames", frameCount+1
logString = wrfLabels[i] + '\t' + str(frameCount+1) + '\t' + str(((XYZout['Z']-XYZwrfout['Z'])**2).sum()) + '\t' + str(((XYZout2['Z']-XYZwrfout2['Z'])**2).sum())
logString +='\n'
logFile.write(logString)
logFile.flush()
#except:
# print "Error!!!", wrfLabels[i], "count", count
logFile.close()
| yaukwankiu/armor | tests/contourPlotTest.py | Python | cc0-1.0 | 7,007 |
from django.conf import settings
from django.forms import widgets
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
class GoogleMapsAddressWidget(widgets.TextInput):
"a widget that will place a google map right after the #id_address field"
class Media:
css = {'all': (settings.STATIC_URL + 'django_google_maps/css/google-maps-admin.css',),}
js = (
'https://ajax.googleapis.com/ajax/libs/jquery/1.4.4/jquery.min.js',
'http://maps.google.com/maps/api/js?sensor=false',
settings.STATIC_URL + 'django_google_maps/js/google-maps-admin.js',
)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
return mark_safe(u'<input%s /><div class="map_canvas_wrapper"><div id="map_canvas"></div></div>' % flatatt(final_attrs)) | desarrollosimagos/svidb | administrativo/django_google_maps/widgets.py | Python | gpl-3.0 | 1,168 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from datetime import date, datetime
from vistrails.core.system import strftime, time_strptime
from vistrails.db import VistrailsDBException
class SQLDAO:
def __init__(self):
pass
def convertFromDB(self, value, type, db_type):
if value is not None:
if type == 'str':
return str(value)
elif type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'date':
if db_type == 'date':
return value
else:
return date(*time_strptime(str(value), '%Y-%m-%d')[0:3])
elif type == 'datetime':
if db_type == 'datetime':
return value
else:
return datetime(*time_strptime(str(value),
'%Y-%m-%d %H:%M:%S')[0:6])
return None
def convertToDB(self, value, type, db_type):
if value is not None:
if type == 'str':
# return "'" + str(value).replace("'", "''") + "'"
return str(value)
elif type == 'long':
return str(value)
elif type == 'float':
return str(value)
elif type == 'int':
return str(value)
elif type == 'date':
return value.isoformat()
elif type == 'datetime':
return strftime(value, '%Y-%m-%d %H:%M:%S')
else:
return str(value)
return None
def createSQLSelect(self, table, columns, whereMap, orderBy=None,
forUpdate=False):
columnStr = ', '.join(columns)
whereStr = ''
whereClause = ''
values = []
for column, value in whereMap.iteritems():
whereStr += '%s%s = %%s' % \
(whereClause, column)
values.append(value)
whereClause = ' AND '
dbCommand = """SELECT %s FROM %s WHERE %s""" % \
(columnStr, table, whereStr)
if orderBy is not None:
dbCommand += " ORDER BY " + orderBy
if forUpdate:
dbCommand += " FOR UPDATE"
dbCommand += ";"
return (dbCommand, tuple(values))
def createSQLInsert(self, table, columnMap):
columns = []
values = []
for column, value in columnMap.iteritems():
if value is None:
value = 'NULL'
columns.append(column)
values.append(value)
columnStr = ', '.join(columns)
# valueStr = '%s, '.join(values)
valueStr = ''
if len(values) > 1:
valueStr = '%s,' * (len(values) - 1) + '%s'
dbCommand = """INSERT INTO %s(%s) VALUES (%s);""" % \
(table, columnStr, valueStr)
return (dbCommand, tuple(values))
def createSQLUpdate(self, table, columnMap, whereMap):
setStr = ''
comma = ''
values = []
for column, value in columnMap.iteritems():
# if value is None:
# value = 'NULL'
setStr += '%s%s = %%s' % (comma, column)
comma = ', '
values.append(value)
whereStr = ''
whereClause = ''
for column, value in whereMap.iteritems():
whereStr += '%s%s = %%s' % (whereClause, column)
values.append(value)
whereClause = ' AND '
dbCommand = """UPDATE %s SET %s WHERE %s;""" % \
(table, setStr, whereStr)
return (dbCommand, tuple(values))
def createSQLDelete(self, table, whereMap):
whereStr = ''
whereClause = ''
values = []
for column, value in whereMap.iteritems():
whereStr += '%s %s = %%s' % (whereClause, column)
values.append(value)
whereClause = ' AND '
dbCommand = """DELETE FROM %s WHERE %s;""" % \
(table, whereStr)
return (dbCommand, tuple(values))
def executeSQL(self, db, cmd_tuple, isFetch):
dbCommand, values = cmd_tuple
# print 'db: %s' % dbCommand
# print 'values:', values
data = None
cursor = db.cursor()
try:
cursor.execute(dbCommand, values)
if isFetch:
data = cursor.fetchall()
else:
data = cursor.lastrowid
except Exception, e:
raise VistrailsDBException('Command "%s" with values "%s" '
'failed: %s' % (dbCommand, values, e))
finally:
cursor.close()
return data
def start_transaction(self, db):
db.begin()
def commit_transaction(self, db):
db.commit()
def rollback_transaction(self, db):
db.rollback()
| VisTrails/VisTrails | vistrails/db/versions/v1_0_1/persistence/sql/sql_dao.py | Python | bsd-3-clause | 6,946 |
# lint-amnesty, pylint: disable=missing-module-docstring
from .outlines import (
get_content_errors,
get_course_keys_with_outlines,
get_course_outline,
get_user_course_outline,
get_user_course_outline_details,
key_supports_outlines,
replace_course_outline,
)
| eduNEXT/edx-platform | openedx/core/djangoapps/content/learning_sequences/api/__init__.py | Python | agpl-3.0 | 287 |
"""Support to interact with a Music Player Daemon."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MPD"
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_entities([device], True)
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = None
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = 0
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 30
self._client.idletimeout = None
def _connect(self):
"""Connect to MPD."""
import mpd
try:
self._client.connect(self.server, self.port)
if self.password is not None:
self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
import mpd
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
self._update_playlists()
@property
def available(self):
"""Return true if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
if self._status["state"] == "play":
return STATE_PLAYING
if self._status["state"] == "pause":
return STATE_PAUSED
if self._status["state"] == "stop":
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get("file")
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get("time")
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get("name", None)
title = self._currentsong.get("title", None)
file_name = self._currentsong.get("file", None)
if name is None and title is None:
if file_name is None:
return "None"
return os.path.basename(file_name)
if name is None:
return title
if title is None:
return name
return "{}: {}".format(name, title)
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get("artist")
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get("album")
@property
def volume_level(self):
"""Return the volume level."""
if "volume" in self._status:
return int(self._status["volume"]) / 100
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._status is None:
return None
supported = SUPPORT_MPD
if "volume" in self._status:
supported |= SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE
if self._playlists is not None:
supported |= SUPPORT_SELECT_SOURCE
return supported
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
import mpd
try:
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data["playlist"])
except mpd.CommandError as error:
self._playlists = None
_LOGGER.warning("Playlists could not be updated: %s:", error)
def set_volume_level(self, volume):
"""Set volume of media player."""
if "volume" in self._status:
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if "volume" in self._status:
if mute:
self._muted_volume = self.volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._muted_volume)
self._muted = mute
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug("Playing playlist: %s", media_id)
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning("Unknown playlist name %s", media_id)
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(int(self._status["random"]))
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self._client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self._client.play()
self._update_playlists(no_throttle=True)
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
| fbradyirl/home-assistant | homeassistant/components/mpd/media_player.py | Python | apache-2.0 | 10,629 |
"""
Messages - file ``/var/log/messages``
=====================================
Reads the ``/var/log/messages`` file as a standard LogFileOutput class parser.
The important function is ``get(s)``, which finds all lines with the string
**s** and parses them into dictionaries with the following keys:
* ``timestamp`` - the time the log line was written
* ``procname`` - the process or facility that wrote the line
* ``hostname`` - the host that generated the log line
* ``message`` - the rest of the message (after the process name)
* ``raw_message`` - the raw message before being split.
It is best to use filters and/or scanners with the messages log, to speed up
parsing. These work on the raw message, before being parsed.
Sample log lines::
May 18 15:13:34 lxc-rhel68-sat56 jabberd/sm[11057]: session started: jid=rhn-dispatcher-sat@lxc-rhel6-sat56.redhat.com/superclient
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM...
May 18 15:24:28 lxc-rhel68-sat56 yum[11597]: Installed: lynx-2.8.6-27.el6.x86_64
May 18 15:36:19 lxc-rhel68-sat56 yum[11954]: Updated: sos-3.2-40.el6.noarch
Examples:
>>> Messages.filters.append('wrapper')
>>> Messages.token_scan('daemon_start', 'Wrapper Started as Daemon')
>>> msgs = shared[Messages]
>>> len(msgs.lines)
>>> wrapper_msgs = msgs.get('wrapper') # Can only rely on lines filtered being present
>>> wrapper_msgs[0]
{'timestamp': 'May 18 15:13:36', 'hostname': 'lxc-rhel68-sat56',
'procname': wrapper[11375]', 'message': '--> Wrapper Started as Daemon',
'raw_message': 'May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon'
}
>>> msgs.daemon_start # Token set if matching lines present in logs
True
"""
from .. import LogFileOutput, parser
@parser('messages')
class Messages(LogFileOutput):
"""
Read the ``/var/log/messages`` file using the LogFileOutput parser class.
"""
def get(self, s):
"""
Parameters:
s (str): String to search for
Returns:
([dicts]): all lines that contain 's' as a list of dictionaries
Examples::
[
{'timestamp':'May 18 14:24:14',
'procname': 'kernel',
'hostname':'lxc-rhel68-sat56',
'message': '...',
'raw_message': '...: ...'
}, ...
]
"""
r = []
for l in self.lines:
if s in l:
info, msg = [i.strip() for i in l.split(': ', 1)]
msg_info = {
'message': msg,
'raw_message': l
}
info_splits = info.split()
if len(info_splits) == 5:
msg_info['timestamp'] = ' '.join(info_splits[:3])
msg_info['hostname'] = info_splits[3]
msg_info['procname'] = info_splits[4]
r.append(msg_info)
return r
| PaulWay/insights-core | insights/parsers/messages.py | Python | apache-2.0 | 3,079 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tool to generate api_docs for TensorFlow2.
```
python generate2.py --output_dir=/tmp/out
```
Requires a local installation of `tensorflow_docs`:
```
pip install git+https://github.com/tensorflow/docs
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import textwrap
from absl import app
from absl import flags
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import parser
import tensorboard
import tensorflow_estimator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.
parser.tf_inspect = tf_inspect
# `tf` has an `__all__` that doesn't list important things like `keras`.
# The doc generator recognizes `__all__` as the list of public symbols.
# So patch `tf.__all__` to list everything.
tf.__all__ = [item_name for item_name, value in tf_inspect.getmembers(tf)]
FLAGS = flags.FLAGS
flags.DEFINE_string(
"code_url_prefix",
"/code/stable/tensorflow",
"A url to prepend to code paths when creating links to defining code")
flags.DEFINE_string(
"output_dir", "/tmp/out",
"A directory, where the docs will be output to.")
flags.DEFINE_bool("search_hints", True,
"Include meta-data search hints at the top of each file.")
flags.DEFINE_string("site_path", "",
"The prefix ({site-path}/api_docs/python/...) used in the "
"`_toc.yaml` and `_redirects.yaml` files")
if tf.__version__.startswith('1'):
PRIVATE_MAP = {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.contrib.estimator': ['python'],
'tf': ['python', 'core', 'compiler', 'examples', 'tools'],
# There's some aliasing between the compats and v1/2s, so it's easier to
# block by name and location than by deleting, or hiding objects.
'tf.compat.v1.compat': ['v1', 'v2'],
'tf.compat.v2.compat': ['v1', 'v2']
}
DO_NOT_DESCEND_MAP = {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
else:
PRIVATE_MAP = {
'tf': ['python', 'core', 'compiler', 'examples', 'tools'],
# There's some aliasing between the compats and v1/2s, so it's easier to
# block by name and location than by deleting, or hiding objects.
'tf.compat.v1.compat': ['v1', 'v2'],
'tf.compat.v2.compat': ['v1', 'v2']
}
DO_NOT_DESCEND_MAP = {}
tf.__doc__ = """
## TensorFlow
```
pip install tensorflow
```
"""
_raw_ops_doc = textwrap.dedent("""\n
Note: `tf.raw_ops` provides direct/low level access to all TensorFlow ops. See \
[the RFC](https://github.com/tensorflow/community/blob/master/rfcs/20181225-tf-raw-ops.md)
for details. Unless you are library writer, you likely do not need to use these
ops directly.""")
if LooseVersion(tf.__version__) < LooseVersion('2'):
tf.raw_ops.__doc__ = _raw_ops_doc
tf.contrib.__doc__ = """
Contrib module containing volatile or experimental code.
Warning: The `tf.contrib` module will not be included in TensorFlow 2.0. Many
of its submodules have been integrated into TensorFlow core, or spun-off into
other projects like [`tensorflow_io`](https://github.com/tensorflow/io), or
[`tensorflow_addons`](https://github.com/tensorflow/addons). For instructions
on how to upgrade see the
[Migration guide](https://www.tensorflow.org/guide/migrate).
"""
else:
tf.raw_ops.__doc__ += _raw_ops_doc
# The doc generator isn't aware of tf_export.
# So prefix the score tuples with -1 when this is the canonical name, +1
# otherwise. The generator chooses the name with the lowest score.
class TfExportAwareDocGeneratorVisitor(
doc_generator_visitor.DocGeneratorVisitor):
"""A `tf_export` aware doc_visitor."""
def _score_name(self, name):
canonical = tf_export.get_canonical_name_for_symbol(self._index[name])
canonical_score = 1
if canonical is not None and name == "tf." + canonical:
canonical_score = -1
scores = super(TfExportAwareDocGeneratorVisitor, self)._score_name(name)
return (canonical_score,) + scores
def _hide_layer_and_module_methods():
"""Hide methods and properties defined in the base classes of keras layers."""
# __dict__ only sees attributes defined in *this* class, not on parent classes
module_contents = list(tf.Module.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents:
if name == "__init__":
continue
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def build_docs(output_dir, code_url_prefix, search_hints=True):
"""Build api docs for tensorflow v2.
Args:
output_dir: A string path, where to put the files.
code_url_prefix: prefix for "Defined in" links.
search_hints: Bool. Include meta-data search hints at the top of each file.
"""
_hide_layer_and_module_methods()
try:
doc_controls.do_not_generate_docs(tf.tools)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.compat.v1.pywrap_tensorflow)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.pywrap_tensorflow)
except AttributeError:
pass
try:
doc_controls.do_not_generate_docs(tf.flags)
except AttributeError:
pass
base_dir = path.normpath(path.join(tf.__file__, "../.."))
base_dirs = (
path.join(base_dir, "tensorflow_core"),
# External packages base directories
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
)
code_url_prefixes = (
code_url_prefix,
# External packages source repositories,
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
if LooseVersion(tf.__version__) < LooseVersion('2'):
root_title = 'TensorFlow'
elif LooseVersion(tf.__version__) >= LooseVersion('2'):
root_title = 'TensorFlow 2.0'
doc_generator = generate_lib.DocGenerator(
root_title=root_title,
py_modules=[("tf", tf)],
base_dir=base_dirs,
search_hints=search_hints,
code_url_prefix=code_url_prefixes,
site_path=FLAGS.site_path,
visitor_cls=TfExportAwareDocGeneratorVisitor,
private_map=PRIVATE_MAP,
do_not_descend_map=DO_NOT_DESCEND_MAP)
doc_generator.build(output_dir)
def main(argv):
del argv
build_docs(output_dir=FLAGS.output_dir,
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints)
if __name__ == "__main__":
app.run(main)
| ppwwyyxx/tensorflow | tensorflow/tools/docs/generate2.py | Python | apache-2.0 | 8,840 |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
import config
LEADER_1_1 = 1
BBR_1 = 2
BBR_2 = 3
WAIT_ATTACH = 5
WAIT_REDUNDANCE = 3
ROUTER_SELECTION_JITTER = 1
BBR_REGISTRATION_JITTER = 5
"""
Topology
LEADER_1_1 --- BBR_1
\ |
\ |
\ |
BBR_2
1) Bring up Leader_1_1 and then BBR_1, BBR_1 becomes Primary Backbone Router.
2) Reset BBR_1, if bring back soon, it could restore the Backbone Router Service
from the network, after increasing sequence number, it will reregister its
Backbone Router Service to the Leader and become Primary.
3) Reset BBR_1, if bring back after it is released in the network, BBR_1 will
choose a random sequence number, register its Backbone Router Service to
Leader and become Primary.
4) Configure BBR_2 with highest sequence number and explicitly trigger SRV_DATA.ntf.
BBR_2 would become Primary and BBR_1 would change to Secondary with sequence
number increased by 1.
a) Check communication via DUA.
5) Stop BBR_2, BBR_1 would become Primary after detecting there is no available
Backbone Router Service in Thread Network.
6) Bring back BBR_2, and it would become Secondary.
a) Check the uniqueness of DUA by comparing the one in above 4a).
b) Check communication via DUA.
"""
class TestBackboneRouterService(thread_cert.TestCase):
TOPOLOGY = {
LEADER_1_1: {
'version': '1.1',
'whitelist': [BBR_1, BBR_2],
},
BBR_1: {
'version': '1.2',
'whitelist': [LEADER_1_1, BBR_2],
'is_bbr': True
},
BBR_2: {
'version': '1.2',
'whitelist': [LEADER_1_1, BBR_1],
'is_bbr': True
},
}
"""All nodes are created with default configurations"""
def test(self):
self.nodes[LEADER_1_1].start()
WAIT_TIME = WAIT_ATTACH
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[LEADER_1_1].get_state(), 'leader')
self.simulator.set_lowpan_context(1, config.DOMAIN_PREFIX)
# 1) First Backbone Router would become the Primary.
self.nodes[BBR_1].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[BBR_1].set_bbr_registration_jitter(BBR_REGISTRATION_JITTER)
self.nodes[BBR_1].set_backbone_router(seqno=1)
self.nodes[BBR_1].start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_state(), 'router')
self.nodes[BBR_1].enable_backbone_router()
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(),
'Primary')
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_NETWORK_BBRS_ADDRESS)
assert not self.nodes[BBR_1].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
self.nodes[BBR_1].set_domain_prefix(config.DOMAIN_PREFIX)
WAIT_TIME = WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
# 2) Reset BBR_1 and bring it back soon.
# Verify that it restores Primary State with sequence number
# increased by 1.
self.nodes[BBR_1].reset()
self.nodes[BBR_1].set_bbr_registration_jitter(BBR_REGISTRATION_JITTER)
self.nodes[BBR_1].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[BBR_1].set_domain_prefix(config.DOMAIN_PREFIX)
self.nodes[BBR_1].enable_backbone_router()
self.nodes[BBR_1].start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_state(), 'router')
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(),
'Primary')
assert self.nodes[BBR_1].get_backbone_router()['seqno'] == 2
# 3) Reset BBR_1 and bring it back after its original router id is released
# 200s (100s MaxNeighborAge + 90s InfiniteCost + 10s redundance)
# Verify it becomes Primary again.
# Note: To ensure test in next step, here Step 3) will repeat until
# the random sequence number is not the highest value 255.
while True:
self.nodes[BBR_1].reset()
WAIT_TIME = 200
self.simulator.go(WAIT_TIME)
self.nodes[BBR_1].set_router_selection_jitter(
ROUTER_SELECTION_JITTER)
self.nodes[BBR_1].set_bbr_registration_jitter(
BBR_REGISTRATION_JITTER)
self.nodes[BBR_1].set_domain_prefix(config.DOMAIN_PREFIX)
self.nodes[BBR_1].enable_backbone_router()
self.nodes[BBR_1].start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_state(), 'router')
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(),
'Primary')
BBR_1_SEQNO = self.nodes[BBR_1].get_backbone_router()['seqno']
if (BBR_1_SEQNO != 255):
break
#4) Configure BBR_2 with highest sequence number (255) and
# explicitly trigger SRV_DATA.ntf.
# Verify BBR_2 would become Primary and BBR_1 would change to
# Secondary with sequence number increased by 1.
# Bring up BBR_2, it becomes Router with backbone function disabled
# by default.
self.nodes[BBR_2].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[BBR_2].set_bbr_registration_jitter(BBR_REGISTRATION_JITTER)
self.nodes[BBR_2].set_domain_prefix(config.DOMAIN_PREFIX)
self.nodes[BBR_2].start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_state(), 'router')
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_backbone_router_state(),
'Disabled')
assert not self.nodes[BBR_2].has_ipmaddr(
config.ALL_NETWORK_BBRS_ADDRESS)
assert not self.nodes[BBR_2].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
# Enable Backbone function, it will stay at Secondary state as
# there is Primary Backbone Router already.
# Here removes the Domain Prefix before enabling backbone function
# intentionally to avoid SRV_DATA.ntf due to prefix inconsistency.
self.nodes[BBR_2].remove_domain_prefix(config.DOMAIN_PREFIX)
self.nodes[BBR_2].enable_backbone_router()
self.nodes[BBR_2].set_backbone_router(seqno=255)
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_backbone_router_state(),
'Secondary')
# Check no SRV_DATA.ntf.
messages = self.simulator.get_messages_sent_by(BBR_2)
msg = messages.next_coap_message('0.02', '/a/sd', False)
assert (
msg is None
), "Error: %d sent unexpected SRV_DATA.ntf when there is PBbr already"
# Flush relative message queue.
self.flush_nodes([BBR_1])
# BBR_2 registers SRV_DATA.ntf explicitly.
self.nodes[BBR_2].register_backbone_router()
WAIT_TIME = WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_backbone_router_state(),
'Primary')
# Verify BBR_1 becomes Secondary and sends SRV_DATA.ntf to deregister
# its service.
messages = self.simulator.get_messages_sent_by(BBR_1)
messages.next_coap_message('0.02', '/a/sd', True)
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(),
'Secondary')
# Verify Sequence number increases when become Secondary from Primary.
assert self.nodes[BBR_1].get_backbone_router()['seqno'] == (
BBR_1_SEQNO + 1)
# 4a) Check communication via DUA.
bbr2_dua = self.nodes[BBR_2].get_addr(config.DOMAIN_PREFIX)
self.assertTrue(self.nodes[BBR_1].ping(bbr2_dua))
# 5) Stop BBR_2, BBR_1 becomes Primary after detecting there is no
# available Backbone Router Service.
self.nodes[BBR_2].reset()
self.nodes[LEADER_1_1].release_router_id(
self.nodes[BBR_2].get_router_id())
# Wait for the dissemination of Network Data without Backbone Router service
self.simulator.go(10)
# BBR_1 becomes Primary.
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(),
'Primary')
messages = self.simulator.get_messages_sent_by(BBR_1)
messages.next_coap_message('0.02', '/a/sd', True)
# 6) Bring back BBR_2.
# Verify that BBR_2 stays at Secondary.
self.nodes[BBR_2].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[BBR_2].set_bbr_registration_jitter(BBR_REGISTRATION_JITTER)
self.nodes[BBR_1].set_domain_prefix(config.DOMAIN_PREFIX)
self.nodes[BBR_2].enable_backbone_router()
self.nodes[BBR_2].interface_up()
self.nodes[BBR_2].thread_start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_state(), 'router')
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_2].get_backbone_router_state(),
'Secondary')
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_NETWORK_BBRS_ADDRESS)
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
# 6a) Check the uniqueness of DUA by comparing the one in above 4a).
bbr2_dua2 = self.nodes[BBR_2].get_addr(config.DOMAIN_PREFIX)
assert bbr2_dua == bbr2_dua2, 'Error: Unexpected different DUA ({} v.s. {})'.format(
bbr2_dua, bbr2_dua2)
# 6b) Check communication via DUA
self.assertTrue(self.nodes[BBR_1].ping(bbr2_dua))
if __name__ == '__main__':
unittest.main()
| lanyuwen/openthread | tests/scripts/thread-cert/v1_2_test_backbone_router_service.py | Python | bsd-3-clause | 12,234 |
import asyncio
import json
import os
import pytest
import shakedown
import shlex
import time
import uuid
import sys
import retrying
from datetime import timedelta
from dcos import http, mesos
from dcos.errors import DCOSException, DCOSHTTPException
from distutils.version import LooseVersion
from json.decoder import JSONDecodeError
from shakedown import marathon
from urllib.parse import urljoin
marathon_1_3 = pytest.mark.skipif('marthon_version_less_than("1.3")')
marathon_1_4 = pytest.mark.skipif('marthon_version_less_than("1.4")')
marathon_1_5 = pytest.mark.skipif('marthon_version_less_than("1.5")')
marathon_1_6 = pytest.mark.skipif('marthon_version_less_than("1.6")')
def ignore_exception(exc):
"""Used with @retrying.retry to ignore exceptions in a retry loop.
ex. @retrying.retry( retry_on_exception=ignore_exception)
It does verify that the object passed is an exception
"""
return isinstance(exc, Exception)
def constraints(name, operator, value=None):
constraints = [name, operator]
if value is not None:
constraints.append(value)
return [constraints]
def pod_constraints(name, operator, value=None):
constraints = {
'fieldName': name,
'operator': operator,
'value': value
}
return constraints
def unique_host_constraint():
return constraints('hostname', 'UNIQUE')
def assert_http_code(url, http_code='200'):
cmd = r'curl -s -o /dev/null -w "%{http_code}"'
cmd = cmd + ' {}'.format(url)
status, output = shakedown.run_command_on_master(cmd)
assert status, "{} failed".format(cmd)
assert output == http_code, "Got {} status code".format(output)
def add_role_constraint_to_app_def(app_def, roles=['*']):
"""Roles are a comma-delimited list. Acceptable roles include:
'*'
'slave_public'
'*, slave_public'
"""
app_def['acceptedResourceRoles'] = roles
return app_def
def pin_to_host(app_def, host):
app_def['constraints'] = constraints('hostname', 'LIKE', host)
def pin_pod_to_host(app_def, host):
app_def['scheduling']['placement']['constraints'].append(pod_constraints('hostname', 'LIKE', host))
def health_check(path='/', protocol='HTTP', port_index=0, failures=1, timeout=2):
return {
'protocol': protocol,
'path': path,
'timeoutSeconds': timeout,
'intervalSeconds': 1,
'maxConsecutiveFailures': failures,
'portIndex': port_index
}
def external_volume_mesos_app(volume_name=None):
if volume_name is None:
volume_name = 'marathon-si-test-vol-{}'.format(uuid.uuid4().hex)
return
def command_health_check(command='true', failures=1, timeout=2):
return {
'protocol': 'COMMAND',
'command': {'value': command},
'timeoutSeconds': timeout,
'intervalSeconds': 2,
'maxConsecutiveFailures': failures
}
def cluster_info(mom_name='marathon-user'):
print("DC/OS: {}, in {} mode".format(shakedown.dcos_version(), shakedown.ee_version()))
agents = shakedown.get_private_agents()
print("Agents: {}".format(len(agents)))
client = marathon.create_client()
about = client.get_about()
print("Marathon version: {}".format(about.get("version")))
if shakedown.service_available_predicate(mom_name):
with shakedown.marathon_on_marathon(mom_name):
try:
client = marathon.create_client()
about = client.get_about()
print("Marathon MoM version: {}".format(about.get("version")))
except Exception:
print("Marathon MoM not present")
else:
print("Marathon MoM not present")
def delete_all_apps():
client = marathon.create_client()
apps = client.get_apps()
for app in apps:
if app['id'] == '/marathon-user':
print('WARNING: not removing marathon-user, because it is special')
else:
client.remove_app(app['id'], True)
def stop_all_deployments(noisy=False):
client = marathon.create_client()
deployments = client.get_deployments()
for deployment in deployments:
try:
client.stop_deployment(deployment['id'])
except Exception as e:
if noisy:
print(e)
def delete_all_apps_wait():
delete_all_apps()
shakedown.deployment_wait(timedelta(minutes=5).total_seconds())
def delete_all_groups():
client = marathon.create_client()
groups = client.get_groups()
for group in groups:
client.remove_group(group["id"])
def clean_up_marathon():
try:
stop_all_deployments()
clear_pods()
delete_all_apps_wait()
delete_all_groups()
except Exception as e:
print(e)
def ip_other_than_mom():
mom_ip = ip_of_mom()
agents = shakedown.get_private_agents()
for agent in agents:
if agent != mom_ip:
return agent
return None
def ip_of_mom():
service_ips = shakedown.get_service_ips('marathon', 'marathon-user')
for mom_ip in service_ips:
return mom_ip
def ensure_mom():
if not is_mom_installed():
# if there is an active deployment... wait for it.
# it is possible that mom is currently in the process of being uninstalled
# in which case it will not report as installed however install will fail
# until the deployment is finished.
shakedown.deployment_wait()
try:
shakedown.install_package_and_wait('marathon')
shakedown.deployment_wait()
except Exception:
pass
if not shakedown.wait_for_service_endpoint('marathon-user'):
print('ERROR: Timeout waiting for endpoint')
def is_mom_installed():
return shakedown.package_installed('marathon')
def restart_master_node():
"""Restarts the master node."""
shakedown.run_command_on_master("sudo /sbin/shutdown -r now")
def cpus_on_agent(hostname):
"""Detects number of cores on an agent"""
status, output = shakedown.run_command_on_agent(hostname, "cat /proc/cpuinfo | grep processor | wc -l", noisy=False)
return int(output)
def systemctl_master(command='restart'):
shakedown.run_command_on_master('sudo systemctl {} dcos-mesos-master'.format(command))
def block_iptable_rules_for_seconds(host, port_number, sleep_seconds, block_input=True, block_output=True):
""" For testing network partitions we alter iptables rules to block ports for some time.
We do that as a single SSH command because otherwise it makes it hard to ensure that iptable rules are restored.
"""
filename = 'iptables-{}.rules'.format(uuid.uuid4().hex)
cmd = """
if [ ! -e {backup} ] ; then sudo iptables-save > {backup} ; fi;
{block}
sleep {seconds};
if [ -e {backup} ]; then sudo iptables-restore < {backup} && sudo rm {backup} ; fi
""".format(backup=filename, seconds=sleep_seconds,
block=iptables_block_string(block_input, block_output, port_number))
shakedown.run_command_on_agent(host, cmd)
def iptables_block_string(block_input, block_output, port):
""" Produces a string of iptables blocking command that can be executed on an agent. """
block_input_str = "sudo iptables -I INPUT -p tcp --dport {} -j DROP;".format(port) if block_input else ""
block_output_str = "sudo iptables -I OUTPUT -p tcp --dport {} -j DROP;".format(port) if block_output else ""
return block_input_str + block_output_str
def wait_for_task(service, task, timeout_sec=120):
"""Waits for a task which was launched to be launched"""
now = time.time()
future = now + timeout_sec
while now < future:
response = None
try:
response = shakedown.get_service_task(service, task)
except Exception:
pass
if response is not None and response['state'] == 'TASK_RUNNING':
return response
else:
time.sleep(5)
now = time.time()
return None
def clear_pods():
try:
client = marathon.create_client()
pods = client.list_pod()
for pod in pods:
client.remove_pod(pod["id"], True)
shakedown.deployment_wait()
except Exception:
pass
def get_pod_tasks(pod_id):
pod_id = pod_id.lstrip('/')
pod_tasks = []
tasks = shakedown.get_marathon_tasks()
for task in tasks:
if task['discovery']['name'] == pod_id:
pod_tasks.append(task)
return pod_tasks
def marathon_version():
client = marathon.create_client()
about = client.get_about()
# 1.3.9 or 1.4.0-RC8
return LooseVersion(about.get("version"))
def marthon_version_less_than(version):
return marathon_version() < LooseVersion(version)
dcos_1_10 = pytest.mark.skipif('dcos_version_less_than("1.10")')
dcos_1_9 = pytest.mark.skipif('dcos_version_less_than("1.9")')
dcos_1_8 = pytest.mark.skipif('dcos_version_less_than("1.8")')
dcos_1_7 = pytest.mark.skipif('dcos_version_less_than("1.7")')
def dcos_canonical_version():
version = shakedown.dcos_version().replace('-dev', '')
return LooseVersion(version)
def dcos_version_less_than(version):
return dcos_canonical_version() < LooseVersion(version)
def assert_app_tasks_running(client, app_def):
app_id = app_def['id']
instances = app_def['instances']
app = client.get_app(app_id)
assert app['tasksRunning'] == instances
def assert_app_tasks_healthy(client, app_def):
app_id = app_def['id']
instances = app_def['instances']
app = client.get_app(app_id)
assert app['tasksHealthy'] == instances
def get_marathon_leader_not_on_master_leader_node():
marathon_leader = shakedown.marathon_leader_ip()
master_leader = shakedown.master_leader_ip()
print('marathon leader: {}'.format(marathon_leader))
print('mesos leader: {}'.format(master_leader))
if marathon_leader == master_leader:
delete_marathon_path('v2/leader')
shakedown.wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds())
marathon_leader = assert_marathon_leadership_changed(marathon_leader)
print('switched leader to: {}'.format(marathon_leader))
return marathon_leader
def docker_env_not_set():
return 'DOCKER_HUB_USERNAME' not in os.environ or 'DOCKER_HUB_PASSWORD' not in os.environ
#############
# moving to shakedown START
#############
def install_enterprise_cli_package():
"""Install `dcos-enterprise-cli` package. It is required by the `dcos security`
command to create secrets, manage service accounts etc.
"""
print('Installing dcos-enterprise-cli package')
cmd = 'package install dcos-enterprise-cli --cli --yes'
stdout, stderr, return_code = shakedown.run_dcos_command(cmd, raise_on_error=True)
def is_enterprise_cli_package_installed():
"""Returns `True` if `dcos-enterprise-cli` package is installed."""
stdout, stderr, return_code = shakedown.run_dcos_command('package list --json')
print('package list command returned code:{}, stderr:{}, stdout: {}'.format(return_code, stderr, stdout))
try:
result_json = json.loads(stdout)
except JSONDecodeError as error:
raise DCOSException('Could not parse: "{}"'.format(stdout))(error)
return any(cmd['name'] == 'dcos-enterprise-cli' for cmd in result_json)
def create_docker_pull_config_json(username, password):
"""Create a Docker config.json represented using Python data structures.
:param username: username for a private Docker registry
:param password: password for a private Docker registry
:return: Docker config.json
"""
print('Creating a config.json content for dockerhub username {}'.format(username))
import base64
auth_hash = base64.b64encode('{}:{}'.format(username, password).encode()).decode()
return {
"auths": {
"https://index.docker.io/v1/": {
"auth": auth_hash
}
}
}
def create_docker_credentials_file(username, password, file_name='docker.tar.gz'):
"""Create a docker credentials file. Docker username and password are used to create
a `{file_name}` with `.docker/config.json` containing the credentials.
:param file_name: credentials file name `docker.tar.gz` by default
:type command: str
"""
print('Creating a tarball {} with json credentials for dockerhub username {}'.format(file_name, username))
config_json_filename = 'config.json'
config_json = create_docker_pull_config_json(username, password)
# Write config.json to file
with open(config_json_filename, 'w') as f:
json.dump(config_json, f, indent=4)
try:
# Create a docker.tar.gz
import tarfile
with tarfile.open(file_name, 'w:gz') as tar:
tar.add(config_json_filename, arcname='.docker/config.json')
tar.close()
except Exception as e:
print('Failed to create a docker credentils file {}'.format(e))
raise e
finally:
os.remove(config_json_filename)
def copy_docker_credentials_file(agents, file_name='docker.tar.gz'):
"""Create and copy docker credentials file to passed `{agents}`. Used to access private
docker repositories in tests. File is removed at the end.
:param agents: list of agent IPs to copy the file to
:type agents: list
"""
assert os.path.isfile(file_name), "Failed to upload credentials: file {} not found".format(file_name)
# Upload docker.tar.gz to all private agents
try:
print('Uploading tarball with docker credentials to all private agents...')
for agent in agents:
print("Copying docker credentials to {}".format(agent))
shakedown.copy_file_to_agent(agent, file_name)
except Exception as e:
print('Failed to upload {} to agent: {}'.format(file_name, agent))
raise e
finally:
os.remove(file_name)
def has_secret(secret_name):
"""Returns `True` if the secret with given name exists in the vault.
This method uses `dcos security secrets` command and assumes that `dcos-enterprise-cli`
package is installed.
:param secret_name: secret name
:type secret_name: str
"""
stdout, stderr, return_code = shakedown.run_dcos_command('security secrets list / --json')
if stdout:
result_json = json.loads(stdout)
return secret_name in result_json
return False
def delete_secret(secret_name):
"""Delete a secret with a given name from the vault.
This method uses `dcos security org` command and assumes that `dcos-enterprise-cli`
package is installed.
:param secret_name: secret name
:type secret_name: str
"""
print('Removing existing secret {}'.format(secret_name))
stdout, stderr, return_code = shakedown.run_dcos_command('security secrets delete {}'.format(secret_name))
assert return_code == 0, "Failed to remove existing secret"
def create_secret(name, value=None, description=None):
"""Create a secret with a passed `{name}` and optional `{value}`.
This method uses `dcos security secrets` command and assumes that `dcos-enterprise-cli`
package is installed.
:param name: secret name
:type name: str
:param value: optional secret value
:type value: str
:param description: option secret description
:type description: str
"""
print('Creating new secret {}:{}'.format(name, value))
value_opt = '-v {}'.format(shlex.quote(value)) if value else ''
description_opt = '-d "{}"'.format(description) if description else ''
stdout, stderr, return_code = shakedown.run_dcos_command('security secrets create {} {} "{}"'.format(
value_opt,
description_opt,
name), print_output=True)
assert return_code == 0, "Failed to create a secret"
def create_sa_secret(secret_name, service_account, strict=False, private_key_filename='private-key.pem'):
"""Create an sa-secret with a given private key file for passed service account in the vault. Both
(service account and secret) should share the same key pair. `{strict}` parameter should be
`True` when creating a secret in a `strict` secure cluster. Private key file will be removed
after secret is successfully created.
This method uses `dcos security org` command and assumes that `dcos-enterprise-cli`
package is installed.
:param secret_name: secret name
:type secret_name: str
:param service_account: service account name
:type service_account: str
:param strict: `True` is this a `strict` secure cluster
:type strict: bool
:param private_key_filename: private key file name
:type private_key_filename: str
"""
assert os.path.isfile(private_key_filename), "Failed to create secret: private key not found"
print('Creating new sa-secret {} for service-account: {}'.format(secret_name, service_account))
strict_opt = '--strict' if strict else ''
stdout, stderr, return_code = shakedown.run_dcos_command('security secrets create-sa-secret {} {} {} {}'.format(
strict_opt,
private_key_filename,
service_account,
secret_name))
os.remove(private_key_filename)
assert return_code == 0, "Failed to create a secret"
def has_service_account(service_account):
"""Returns `True` if a service account with a given name already exists.
This method uses `dcos security org` command and assumes that `dcos-enterprise-cli`
package is installed.
:param service_account: service account name
:type service_account: str
"""
stdout, stderr, return_code = shakedown.run_dcos_command('security org service-accounts show --json')
result_json = json.loads(stdout)
return service_account in result_json
def delete_service_account(service_account):
"""Removes an existing service account. This method uses `dcos security org`
command and assumes that `dcos-enterprise-cli` package is installed.
:param service_account: service account name
:type service_account: str
"""
print('Removing existing service account {}'.format(service_account))
stdout, stderr, return_code = \
shakedown.run_dcos_command('security org service-accounts delete {}'.format(service_account))
assert return_code == 0, "Failed to create a service account"
def create_service_account(service_account, private_key_filename='private-key.pem',
public_key_filename='public-key.pem', account_description='SI test account'):
"""Create new private and public key pair and use them to add a new service
with a give name. Public key file is then removed, however private key file
is left since it might be used to create a secret. If you don't plan on creating
a secret afterwards, please remove it manually.
This method uses `dcos security org` command and assumes that `dcos-enterprise-cli`
package is installed.
:param service_account: service account name
:type service_account: str
:param private_key_filename: optional private key file name
:type private_key_filename: str
:param public_key_filename: optional public key file name
:type public_key_filename: str
:param account_description: service account description
:type account_description: str
"""
print('Creating a key pair for the service account')
shakedown.run_dcos_command('security org service-accounts keypair {} {}'.format(
private_key_filename, public_key_filename))
assert os.path.isfile(private_key_filename), "Private key of the service account key pair not found"
assert os.path.isfile(public_key_filename), "Public key of the service account key pair not found"
print('Creating {} service account'.format(service_account))
stdout, stderr, return_code = shakedown.run_dcos_command(
'security org service-accounts create -p {} -d "{}" {}'.format(
public_key_filename, account_description, service_account))
os.remove(public_key_filename)
assert return_code == 0
def set_service_account_permissions(service_account, resource='dcos:superuser', action='full'):
"""Set permissions for given `{service_account}` for passed `{resource}` with
`{action}`. For more information consult the DC/OS documentation:
https://docs.mesosphere.com/1.9/administration/id-and-access-mgt/permissions/user-service-perms/
"""
try:
print('Granting {} permissions to {}/users/{}'.format(action, resource, service_account))
url = urljoin(shakedown.dcos_url(), 'acs/api/v1/acls/{}/users/{}/{}'.format(resource, service_account, action))
req = http.put(url)
msg = 'Failed to grant permissions to the service account: {}, {}'.format(req, req.text)
assert req.status_code == 204, msg
except DCOSHTTPException as e:
if (e.response.status_code == 409):
print('Service account {} already has {} permissions set'.format(service_account, resource))
else:
print("Unexpected HTTP error: {}".format(e.response))
raise
except Exception:
print("Unexpected error:", sys.exc_info()[0])
raise
def add_acs_resource(resource):
"""Create given ACS `{resource}`. For more information consult the DC/OS documentation:
https://docs.mesosphere.com/1.9/administration/id-and-access-mgt/permissions/user-service-perms/
"""
import json
try:
print('Adding ACS resource: {}'.format(resource))
url = urljoin(shakedown.dcos_url(), 'acs/api/v1/acls/{}'.format(resource))
extra_args = {'headers': {'Content-Type': 'application/json'}}
req = http.put(url, data=json.dumps({'description': resource}), **extra_args)
assert req.status_code == 201, 'Failed create ACS resource: {}, {}'.format(req, req.text)
except DCOSHTTPException as e:
if (e.response.status_code == 409):
print('ACS resource {} already exists'.format(resource))
else:
print("Unexpected HTTP error: {}, {}".format(e.response, e.response.text))
raise
except Exception:
print("Unexpected error:", sys.exc_info()[0])
raise
def add_dcos_marathon_user_acls(user='root'):
add_service_account_user_acls(service_account='dcos_marathon', user=user)
def add_service_account_user_acls(service_account, user='root'):
resource = 'dcos:mesos:master:task:user:{}'.format(user)
add_acs_resource(resource)
set_service_account_permissions(service_account, resource, action='create')
def get_marathon_endpoint(path, marathon_name='marathon'):
"""Returns the url for the marathon endpoint."""
return shakedown.dcos_url_path('service/{}/{}'.format(marathon_name, path))
def http_get_marathon_path(name, marathon_name='marathon'):
"""Invokes HTTP GET for marathon url with name.
For example, name='ping': http GET {dcos_url}/service/marathon/ping
"""
url = get_marathon_endpoint(name, marathon_name)
headers = {'Accept': '*/*'}
return http.get(url, headers=headers)
# PR added to dcos-cli (however it takes weeks)
# https://github.com/dcos/dcos-cli/pull/974
def delete_marathon_path(name, marathon_name='marathon'):
"""Invokes HTTP DELETE for marathon url with name.
For example, name='v2/leader': http GET {dcos_url}/service/marathon/v2/leader
"""
url = get_marathon_endpoint(name, marathon_name)
return http.delete(url)
def multi_master():
"""Returns True if this is a multi master cluster. This is useful in
using pytest skipif when testing single master clusters such as:
`pytest.mark.skipif('multi_master')` which will skip the test if
the number of masters is > 1.
"""
# reverse logic (skip if multi master cluster)
return len(shakedown.get_all_masters()) > 1
def __get_all_agents():
"""Provides all agent json in the cluster which can be used for filtering"""
client = mesos.DCOSClient()
agents = client.get_state_summary()['slaves']
return agents
def agent_hostname_by_id(agent_id):
"""Given a agent_id provides the agent ip"""
for agent in __get_all_agents():
if agent['id'] == agent_id:
return agent['hostname']
return None
def deployment_predicate(service_id=None):
deployments = marathon.create_client().get_deployments()
if (service_id is None):
return len(deployments) == 0
else:
filtered = [
deployment for deployment in deployments
if (service_id in deployment['affectedApps'] or service_id in deployment['affectedPods'])
]
return len(filtered) == 0
def deployment_wait(timeout=120, service_id=None):
""" Overriding default shakedown method to make it possible to wait
for specific pods in addition to apps. However we should probably fix
the dcos-cli and remove this method later.
"""
shakedown.time_wait(lambda: deployment_predicate(service_id), timeout)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=60, retry_on_exception=ignore_exception)
def __marathon_leadership_changed_in_mesosDNS(original_leader):
""" This method uses mesosDNS to verify that the leadership changed.
We have to retry because mesosDNS checks for changes only every 30s.
"""
current_leader = shakedown.marathon_leader_ip()
print(f'leader according to MesosDNS: {current_leader}, original leader: {original_leader}') # NOQA E999
assert current_leader, "MesosDNS returned empty string for Marathon leader ip."
error = f'Current leader did not change: original={original_leader}, current={current_leader}' # NOQA E999
assert original_leader != current_leader, error
return current_leader
@retrying.retry(wait_exponential_multiplier=1000, wait_exponential_max=30000, retry_on_exception=ignore_exception)
def __marathon_leadership_changed_in_marathon_api(original_leader):
""" This method uses Marathon API to figure out that leadership changed.
We have to retry here because leader election takes time and what might happen is that some nodes might
not be aware of the new leader being elected resulting in HTTP 502.
"""
# Leader is returned like this 10.0.6.88:8080 - we want just the IP
current_leader = marathon.create_client().get_leader().split(':', 1)[0]
print('leader according to marathon API: {}'.format(current_leader))
assert original_leader != current_leader
return current_leader
def assert_marathon_leadership_changed(original_leader):
""" Verifies leadership changed both by reading v2/leader as well as mesosDNS.
"""
new_leader_marathon = __marathon_leadership_changed_in_marathon_api(original_leader)
new_leader_dns = __marathon_leadership_changed_in_mesosDNS(original_leader)
assert new_leader_marathon == new_leader_dns, "Different leader IPs returned by Marathon ({}) and MesosDNS ({})."\
.format(new_leader_marathon, new_leader_dns)
return new_leader_dns
def running_status_network_info(task_statuses):
""" From a given list of statuses retrieved from mesos API it returns network info of running task.
"""
return running_task_status(task_statuses)['container_status']['network_infos'][0]
def running_task_status(task_statuses):
""" From a given list of statuses retrieved from mesos API it returns status of running task.
"""
for task_status in task_statuses:
if task_status['state'] == "TASK_RUNNING":
return task_status
assert False, "Did not find a TASK_RUNNING status in task statuses: %s" % (task_statuses,)
def task_by_name(tasks, name):
""" Find mesos task by its name
"""
for task in tasks:
if task['name'] == name:
return task
assert False, "Did not find task with name %s in this list of tasks: %s" % (name, tasks,)
async def find_event(event_type, event_stream):
async for event in event_stream:
print('Check event: {}'.format(event))
if event['eventType'] == event_type:
return event
async def assert_event(event_type, event_stream, within=10):
await asyncio.wait_for(find_event(event_type, event_stream), within)
def kill_process_on_host(hostname, pattern):
""" Kill the process matching pattern at ip
:param hostname: the hostname or ip address of the host on which the process will be killed
:param pattern: a regular expression matching the name of the process to kill
:return: IDs of processes that got either killed or terminated on their own
"""
cmd = "ps aux | grep -v grep | grep '{}' | awk '{{ print $2 }}' | tee >(xargs sudo kill -9)".format(pattern)
status, stdout = shakedown.run_command_on_agent(hostname, cmd)
pids = [p.strip() for p in stdout.splitlines()]
if pids:
print("Killed pids: {}".format(", ".join(pids)))
else:
print("Killed no pids")
return pids
| guenter/marathon | tests/system/common.py | Python | apache-2.0 | 29,268 |
# -*- coding: utf-8 -*-
# yellowbrick.features.pca
# Decomposition based feature visualization with PCA.
#
# Author: Carlo Morales
# Author: Raúl Peralta Lozada
# Author: Benjamin Bengfort
# Created: Tue May 23 18:34:27 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: pca.py [] cmorales@pacificmetrics.com $
"""
Decomposition based feature visualization with PCA.
"""
##########################################################################
## Imports
##########################################################################
# NOTE: must import mplot3d to load the 3D projection
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from yellowbrick.style import palettes
from yellowbrick.features.projection import ProjectionVisualizer
from yellowbrick.exceptions import YellowbrickValueError, NotFitted
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA as PCATransformer
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import NotFittedError
##########################################################################
# 2D and 3D PCA Visualizer
##########################################################################
class PCA(ProjectionVisualizer):
"""
Produce a two or three dimensional principal component plot of a data array
projected onto its largest sequential principal components. It is common
practice to scale the data array ``X`` before applying a PC decomposition.
Variable scaling can be controlled using the ``scale`` argument.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> visualizer = PCA()
>>> visualizer.fit_transform(X, y)
>>> visualizer.show()
"""
def __init__(
self,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
**kwargs
):
super(PCA, self).__init__(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
projection=projection,
alpha=alpha,
colorbar=colorbar,
**kwargs
)
# Data Parameters
self.scale = scale
self.proj_features = proj_features
# Create the PCA transformer
self.pca_transformer = Pipeline(
[
("scale", StandardScaler(with_std=self.scale)),
("pca", PCATransformer(self.projection, random_state=random_state)),
]
)
self.alpha = alpha
# Visual Parameters
self.heatmap = heatmap
self._uax, self._lax = None, None
# No heatmap can be drawn with 3d plots as they do not have permit axes
# division.
if self.projection == 3 and self.heatmap:
raise YellowbrickValueError(
"heatmap and colorbar are not compatible with 3d projections"
)
self._random_state = random_state
@property
def random_state(self):
return self._random_state
@random_state.setter
def random_state(self, val):
self._random_state = val
self.pca_transformer.set_params(pca__random_state=val)
@property
def uax(self):
"""
The axes of the colorbar, bottom of scatter plot. This is the colorbar
for heatmap and not for the scatter plot.
"""
if self._uax is None:
raise AttributeError("This visualizer does not have an axes for colorbar")
return self._uax
@property
def lax(self):
"""
The axes of the heatmap below scatter plot.
"""
if self._lax is None:
raise AttributeError("This visualizer does not have an axes for heatmap")
return self._lax
def layout(self, divider=None):
"""
Creates the layout for colorbar and heatmap, adding new axes for the heatmap
if necessary and modifying the aspect ratio. Does not modify the axes or the
layout if ``self.heatmap`` is ``False`` or ``None``.
Parameters
----------
divider: AxesDivider
An AxesDivider to be passed among all layout calls.
"""
# Ensure matplotlib version compatibility
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"heatmap requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set heatmap=False on the visualizer"
)
)
# Create the new axes for the colorbar and heatmap
if divider is None:
divider = make_axes_locatable(self.ax)
# Call to super class ensures that a colorbar is drawn when target is
# continuous.
super(PCA, self).layout(divider)
if self.heatmap:
# Axes for colorbar(for heatmap).
if self._uax is None:
self._uax = divider.append_axes("bottom", size="10%", pad=0.7)
# Axes for heatmap
if self._lax is None:
self._lax = divider.append_axes("bottom", size="15%", pad=0.5)
def fit(self, X, y=None, **kwargs):
"""
Fits the PCA transformer, transforms the data in X, then draws the
decomposition in either 2D or 3D space as a scatter plot.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
self : visualizer
Returns self for use in Pipelines.
"""
# Call super fit to compute features, classes, colors, etc.
super(PCA, self).fit(X=X, y=y, **kwargs)
self.pca_transformer.fit(X)
self.pca_components_ = self.pca_transformer.named_steps["pca"].components_
return self
def transform(self, X, y=None, **kwargs):
"""
Calls the internal `transform` method of the scikit-learn PCA transformer, which
performs a dimensionality reduction on the input features ``X``. Next calls the
``draw`` method of the Yellowbrick visualizer, finally returning a new array of
transformed features of shape ``(len(X), projection)``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
Xp : ndarray or DataFrame of shape n x m
Returns a new array-like object of transformed features of shape
``(len(X), projection)``.
"""
try:
Xp = self.pca_transformer.transform(X)
self.draw(Xp, y)
return Xp
except NotFittedError:
raise NotFitted.from_estimator(self, "transform")
def draw(self, Xp, y):
"""
Plots a scatterplot of points that represented the decomposition,
`pca_features_`, of the original features, `X`, projected into either 2 or
3 dimensions.
If 2 dimensions are selected, a colorbar and heatmap can also be optionally
included to show the magnitude of each feature value to the component.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
# Call to super draw which draws the scatter plot.
super(PCA, self).draw(Xp, y)
if self.proj_features:
# Draws projection features in transformed space.
self._draw_projection_features(Xp, y)
if self.projection == 2:
if self.heatmap:
if not self.colormap:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: change to pcolormesh instead of imshow per #615 spec
im = self.lax.imshow(
self.pca_components_,
interpolation="none",
cmap=self.colormap,
aspect="auto",
)
plt.colorbar(
im,
cax=self.uax,
orientation="horizontal",
ticks=[self.pca_components_.min(), 0, self.pca_components_.max()],
)
return self.ax
def _draw_projection_features(self, Xp, y):
"""
Draw the projection of features in the transformed space.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
x_vector = self.pca_components_[0]
y_vector = self.pca_components_[1]
max_x = max(Xp[:, 0])
max_y = max(Xp[:, 1])
if self.projection == 2:
for i in range(self.pca_components_.shape[1]):
self.ax.arrow(
x=0,
y=0,
dx=x_vector[i] * max_x,
dy=y_vector[i] * max_y,
color="r",
head_width=0.05,
width=0.005,
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
self.features_[i],
color="r",
)
elif self.projection == 3:
z_vector = self.pca_components_[2]
max_z = max(Xp[:, 1])
for i in range(self.pca_components_.shape[1]):
self.ax.plot(
[0, x_vector[i] * max_x],
[0, y_vector[i] * max_y],
[0, z_vector[i] * max_z],
color="r",
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
z_vector[i] * max_z * 1.05,
self.features_[i],
color="r",
)
else:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
return self.ax
def finalize(self, **kwargs):
"""
Draws the title, labels, legends, heatmap, and colorbar as specified by the
keyword arguments.
"""
super(PCA, self).finalize()
self.ax.set_title("Principal Component Plot")
self.ax.set_xlabel("$PC_1$")
self.ax.set_ylabel("$PC_2$")
if self.projection == 3:
self.ax.set_zlabel("$PC_3$")
if self.heatmap == True:
self.lax.set_xticks(np.arange(-0.5, len(self.features_)))
self.lax.set_xticklabels([])
# Makes the labels centered.
self.lax.set_xticks(np.arange(0, len(self.features_)), minor=True)
self.lax.set_xticklabels(
self.features_, rotation=90, fontsize=12, minor=True
)
self.lax.set_yticks(np.arange(0.5, 2))
self.lax.set_yticklabels(["$PC_1$", "$PC_2$"], va="bottom", fontsize=10)
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def pca_decomposition(
X,
y=None,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
show=True,
**kwargs
):
"""
Produce a two or three dimensional principal component plot of the data array ``X``
projected onto its largest sequential principal components. It is common practice
to scale the data array ``X`` before applying a PC decomposition. Variable scaling
can be controlled using the ``scale`` argument.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
Examples
--------
>>> from sklearn import datasets
>>> iris = datasets.load_iris()
>>> X = iris.data
>>> y = iris.target
>>> pca_decomposition(X, y, colors=['r', 'g', 'b'], projection=3)
"""
# Instantiate the visualizer
visualizer = PCA(
ax=ax,
features=features,
classes=classes,
scale=scale,
projection=projection,
proj_features=proj_features,
colors=colors,
colormap=colormap,
alpha=alpha,
random_state=random_state,
colorbar=colorbar,
heatmap=heatmap,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.transform(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Returns the visualizer object.
return visualizer
# Alias for PCA
PCADecomposition = PCA
| DistrictDataLabs/yellowbrick | yellowbrick/features/pca.py | Python | apache-2.0 | 22,996 |
"""
CacheItem interface:
'_id': string,
'url': string,
'response_url': string,
'body': string,
'head': string,
'response_code': int,
'cookies': None,#grab.response.cookies,
TODO: WTF with cookies???
"""
from __future__ import absolute_import
from hashlib import sha1
import zlib
import logging
import MySQLdb
import marshal
import time
from grab.response import Response
from grab.util.py3k_support import *
logger = logging.getLogger('grab.spider.cache_backend.mysql')
# py3 hack
if PY3K:
import re
from functools import reduce
RE_HEXS = re.compile('0x[a-fA-F0-9]{2}')
def _str_to_hexbytes(val):
val = val.replace('\\x', '0x')
# Finds all hexadecimals
xs = re.findall(RE_HEXS, val)
xc = [chr(int(s, 16)) for s in xs]
# Plus escape sequences
xs += ['\\\\', "\\'", '\\"', '\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v', '_\\\\_']
xc += ['_\\\\_', "\'", '\"', '\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\']
# Replaces all
val = reduce(lambda acc, args: acc.replace(*args), zip(xs, xc), val)
# Converts to bytes
return val.encode('raw_unicode_escape')
def _hexbytes_to_str(val):
return str(val)[2:-1]
class CacheBackend(object):
def __init__(self, database, use_compression=True,
mysql_engine='innodb', spider=None, **kwargs):
self.spider = spider
self.conn = MySQLdb.connect(**kwargs)
self.mysql_engine = mysql_engine
self.conn.select_db(database)
self.cursor = self.conn.cursor()
self.cursor.execute('SET TRANSACTION ISOLATION LEVEL READ COMMITTED')
res = self.cursor.execute('show tables')
found = False
for row in self.cursor:
if row[0] == 'cache':
found = True
break
if not found:
self.create_cache_table(self.mysql_engine)
def create_cache_table(self, engine):
self.cursor.execute('begin')
self.cursor.execute('''
create table cache (
id binary(20) not null,
timestamp int not null,
data mediumblob not null,
primary key (id),
index timestamp_idx(timestamp)
) engine = %s
''' % engine)
self.cursor.execute('commit')
def get_item(self, url, timeout=None):
"""
Returned item should have specific interface. See module docstring.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.mysql_query'):
self.cursor.execute('BEGIN')
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
# py3 hack
if PY3K:
sql = '''
SELECT data
FROM cache
WHERE id = x{0} %(query)s
''' % {'query': query}
else:
sql = '''
SELECT data
FROM cache
WHERE id = x%%s %(query)s
''' % {'query': query}
res = self.cursor.execute(sql, (_hash,))
row = self.cursor.fetchone()
self.cursor.execute('COMMIT')
if row:
data = row[0]
# py3 hack
if PY3K:
# A temporary solution for MySQLdb (Py3k port)
# [https://github.com/davispuh/MySQL-for-Python-3]
data = _str_to_hexbytes(data)
return self.unpack_database_value(data)
else:
return None
def unpack_database_value(self, val):
with self.spider.save_timer('cache.read.unpack_data'):
dump = zlib.decompress(val)
return marshal.loads(dump)
def build_hash(self, url):
with self.spider.save_timer('cache.read.build_hash'):
if isinstance(url, unicode):
utf_url = url.encode('utf-8')
else:
utf_url = url
return sha1(utf_url).hexdigest()
def remove_cache_item(self, url):
_hash = self.build_hash(url)
self.cursor.execute('begin')
self.cursor.execute('''
delete from cache where id = x%s
''', (_hash,))
self.cursor.execute('commit')
def load_response(self, grab, cache_item):
grab.fake_response(cache_item['body'])
body = cache_item['body']
def custom_prepare_response_func(transport, g):
response = Response()
response.head = cache_item['head']
response.body = body
response.code = cache_item['response_code']
response.download_size = len(body)
response.upload_size = 0
response.download_speed = 0
# Hack for deprecated behaviour
if 'response_url' in cache_item:
response.url = cache_item['response_url']
else:
logger.debug('You cache contains items without `response_url` key. It is depricated data format. Please re-download you cache or build manually `response_url` keys.')
response.url = cache_item['url']
response.parse()
response.cookies = transport.extract_cookies()
return response
grab.process_request_result(custom_prepare_response_func)
def save_response(self, url, grab):
body = grab.response.body
item = {
'url': url,
'response_url': grab.response.url,
'body': body,
'head': grab.response.head,
'response_code': grab.response.code,
'cookies': None,
}
self.set_item(url, item)
def set_item(self, url, item):
_hash = self.build_hash(url)
data = self.pack_database_value(item)
# py3 hack
if PY3K:
# A temporary solution for MySQLdb (Py3k port)
# [https://github.com/davispuh/MySQL-for-Python-3]
data = _hexbytes_to_str(data)
self.cursor.execute('BEGIN')
ts = int(time.time())
# py3 hack
if PY3K:
sql = '''
INSERT INTO cache (id, timestamp, data)
VALUES(x{0}, {1}, {2})
ON DUPLICATE KEY UPDATE timestamp = {3}, data = {4}
'''
else:
sql = '''
INSERT INTO cache (id, timestamp, data)
VALUES(x%s, %s, %s)
ON DUPLICATE KEY UPDATE timestamp = %s, data = %s
'''
res = self.cursor.execute(sql, (_hash, ts, data, ts, data))
self.cursor.execute('COMMIT')
def pack_database_value(self, val):
dump = marshal.dumps(val)
return zlib.compress(dump)
def clear(self):
self.cursor.execute('BEGIN')
self.cursor.execute('TRUNCATE cache')
self.cursor.execute('COMMIT')
def has_item(self, url, timeout=None):
"""
Test if required item exists in the cache.
"""
_hash = self.build_hash(url)
with self.spider.save_timer('cache.read.mysql_query'):
if timeout is None:
query = ""
else:
ts = int(time.time()) - timeout
query = " AND timestamp > %d" % ts
res = self.cursor.execute('''
SELECT id
FROM cache
WHERE id = x%%s %(query)s
LIMIT 1
''' % {'query': query},
(_hash,))
row = self.cursor.fetchone()
return True if row else False
| boooka/GeoPowerOff | venv/lib/python2.7/site-packages/grab/spider/cache_backend/mysql.py | Python | apache-2.0 | 7,771 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.lib import MissingArgumentError, external_common
from socorro.external.postgresql.base import PostgreSQLBase
class SignatureFirstDate(PostgreSQLBase):
filters = [
('signatures', None, ['list', 'str']),
]
def get(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params['signatures']:
raise MissingArgumentError('signatures')
sql_params = [tuple(params['signatures'])]
sql = """
SELECT
signature,
first_report AS first_date,
first_build::VARCHAR
FROM signatures
WHERE signature IN %s
"""
error_message = 'Failed to retrieve signatures from PostgreSQL'
results = self.query(sql, sql_params, error_message=error_message)
signatures = results.zipped()
return {
'hits': signatures,
'total': len(signatures)
}
| Tayamarn/socorro | socorro/external/postgresql/signature_first_date.py | Python | mpl-2.0 | 1,181 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and op.inputs[1].op.type ==
"Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]),
output_shape_kept_dims)
return [math_ops.div(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(math_ops.reduce_prod(input_shape),
math_ops.reduce_prod(output_shape))
return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat_v2([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
y = array_ops.reshape(left * right, permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat_v2([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape,
constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]),
op.inputs[1], input_rows), None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad,
op.inputs[1],
op.inputs[2],
dim0),
None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad,
op.inputs[1],
op.inputs[2],
dim0),
None, None)
def _SegmentMinOrMaxGrad(op, grad):
"""Gradient for SegmentMin and SegmentMax. Both share the same code."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
return array_ops.where(is_selected, gathered_grads, zeros), None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad)
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
# pylint: disable=protected-access
return gen_math_ops._reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
# pylint: disable=protected-access
return gen_math_ops._reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
# pylint: disable=protected-access
return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
# pylint: disable=protected-access
return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * (2.0 * x)
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops._sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad.op]):
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops._rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
# pylint: disable=protected-access
grad_b = gen_math_ops._rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._tanh_grad(y, grad)
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
# pylint: disable=protected-access
return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(-2 / np.sqrt(np.pi),
dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to a and x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a-1) * math_ops.log(x) - math_ops.lgamma(a))
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x."""
return [-1 * g if g is not None else None for g in _IgammaGrad(op, grad)]
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
# pylint: disable=protected-access
return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.sub(one, x2))
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.sub(one, x2))
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@ops.RegisterGradient("Add")
def _AddGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
array_ops.reshape(math_ops.reduce_sum(
grad * math_ops.div(-x, math_ops.square(y)), ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(
math_ops.realdiv(grad, y), rx), sx),
array_ops.reshape(math_ops.reduce_sum(
grad * math_ops.realdiv(-x, math_ops.square(y)), ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = array_ops.where(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(
math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
x = op.inputs[0]
y = op.inputs[1]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
# .op works with Tensors or IndexedSlices
with ops.control_dependencies([grad.op]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros),
array_ops.where(c, zeros, grad))
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
if not t_a and not t_b:
return (math_ops.matmul(grad, op.inputs[1], transpose_b=True),
math_ops.matmul(op.inputs[0], grad, transpose_a=True))
elif not t_a and t_b:
return (math_ops.matmul(grad, op.inputs[1]),
math_ops.matmul(grad, op.inputs[0], transpose_a=True))
elif t_a and not t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_b=True),
math_ops.matmul(op.inputs[0], grad))
elif t_a and t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_a=True,
transpose_b=True),
math_ops.matmul(grad, op.inputs[0], transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype,
transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a),
_SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(op.inputs[1], grad, dtype_a,
transpose_a=True, transpose_b=True),
_SparseMatMul(grad, op.inputs[0], dtype_b,
transpose_a=True, transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0]))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.bfloat16, dtypes.complex64, dtypes.complex128]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
def _FFTSizeForGrad(grad, rank):
return math_ops.reduce_prod(
array_ops.slice(
array_ops.reverse_v2(array_ops.shape(grad), [0]), (0,), (rank,)))
@ops.RegisterGradient("FFT")
def _FFTGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return math_ops.ifft(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT")
def _IFFTGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 1), dtypes.float32)
return math_ops.fft(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT2D")
def _FFT2DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return math_ops.ifft2d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT2D")
def _IFFT2DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 2), dtypes.float32)
return math_ops.fft2d(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("FFT3D")
def _FFT3DGrad(_, grad):
size = math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return math_ops.ifft3d(grad) * math_ops.complex(size, 0.)
@ops.RegisterGradient("IFFT3D")
def _IFFT3DGrad(_, grad):
rsize = 1. / math_ops.cast(_FFTSizeForGrad(grad, 3), dtypes.float32)
return math_ops.fft3d(grad) * math_ops.complex(rsize, 0.)
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [math_ops.cumsum(grad, axis, exclusive=exclusive,
reverse=not reverse), None]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(prod * grad, axis, exclusive=exclusive,
reverse=not reverse)
return [out / x, None]
| AndreasMadsen/tensorflow | tensorflow/python/ops/math_grad.py | Python | apache-2.0 | 32,638 |
"""Test the Dexcom config flow."""
from pydexcom import AccountError, SessionError
from homeassistant.components.dexcom.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED
from tests.async_mock import patch
from tests.common import MockConfigEntry
from tests.components.dexcom import CONFIG, init_integration
async def test_setup_entry_account_error(hass):
"""Test entry setup failed due to account error."""
entry = MockConfigEntry(
domain=DOMAIN,
title="test_username",
unique_id="test_username",
data=CONFIG,
options=None,
)
with patch(
"homeassistant.components.dexcom.Dexcom",
side_effect=AccountError,
):
entry.add_to_hass(hass)
result = await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert result is False
async def test_setup_entry_session_error(hass):
"""Test entry setup failed due to session error."""
entry = MockConfigEntry(
domain=DOMAIN,
title="test_username",
unique_id="test_username",
data=CONFIG,
options=None,
)
with patch(
"homeassistant.components.dexcom.Dexcom",
side_effect=SessionError,
):
entry.add_to_hass(hass)
result = await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert result is False
async def test_unload_entry(hass):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
| tchellomello/home-assistant | tests/components/dexcom/test_init.py | Python | apache-2.0 | 1,894 |
import copy
import numbers
from collections import Hashable
from functools import partial
from bson import ObjectId, json_util
from bson.dbref import DBRef
from bson.son import SON
import pymongo
import six
from mongoengine import signals
from mongoengine.base.common import get_document
from mongoengine.base.datastructures import (BaseDict, BaseList,
EmbeddedDocumentList,
SemiStrictDict, StrictDict)
from mongoengine.base.fields import ComplexBaseField
from mongoengine.common import _import_class
from mongoengine.errors import (FieldDoesNotExist, InvalidDocumentError,
LookUpError, OperationError, ValidationError)
import collections
__all__ = ('BaseDocument',)
NON_FIELD_ERRORS = '__all__'
class BaseDocument(object):
__slots__ = ('_changed_fields', '_initialised', '_created', '_data',
'_dynamic_fields', '_auto_id_field', '_db_field_map',
'__weakref__')
_dynamic = False
_dynamic_lock = True
STRICT = False
def __init__(self, *args, **values):
"""
Initialise a document or embedded document
:param __auto_convert: Try and will cast python objects to Object types
:param values: A dictionary of values for the document
"""
self._initialised = False
self._created = True
if args:
# Combine positional arguments with named arguments.
# We only want named arguments.
field = iter(self._fields_ordered)
# If its an automatic id field then skip to the first defined field
if getattr(self, '_auto_id_field', False):
next(field)
for value in args:
name = next(field)
if name in values:
raise TypeError(
'Multiple values for keyword argument "%s"' % name)
values[name] = value
__auto_convert = values.pop('__auto_convert', True)
# 399: set default values only to fields loaded from DB
__only_fields = set(values.pop('__only_fields', values))
_created = values.pop('_created', True)
signals.pre_init.send(self.__class__, document=self, values=values)
# Check if there are undefined fields supplied to the constructor,
# if so raise an Exception.
if not self._dynamic and (self._meta.get('strict', True) or _created):
_undefined_fields = set(values.keys()) - set(
list(self._fields.keys()) + ['id', 'pk', '_cls', '_text_score'])
if _undefined_fields:
msg = (
'The fields "{0}" do not exist on the document "{1}"'
).format(_undefined_fields, self._class_name)
raise FieldDoesNotExist(msg)
if self.STRICT and not self._dynamic:
self._data = StrictDict.create(allowed_keys=self._fields_ordered)()
else:
self._data = SemiStrictDict.create(
allowed_keys=self._fields_ordered)()
self._dynamic_fields = SON()
# Assign default values to instance
for key, field in self._fields.items():
if self._db_field_map.get(key, key) in __only_fields:
continue
value = getattr(self, key, None)
setattr(self, key, value)
if '_cls' not in values:
self._cls = self._class_name
# Set passed values after initialisation
if self._dynamic:
dynamic_data = {}
for key, value in values.items():
if key in self._fields or key == '_id':
setattr(self, key, value)
elif self._dynamic:
dynamic_data[key] = value
else:
FileField = _import_class('FileField')
for key, value in values.items():
if key == '__auto_convert':
continue
key = self._reverse_db_field_map.get(key, key)
if key in self._fields or key in ('id', 'pk', '_cls'):
if __auto_convert and value is not None:
field = self._fields.get(key)
if field and not isinstance(field, FileField):
value = field.to_python(value)
setattr(self, key, value)
else:
self._data[key] = value
# Set any get_<field>_display methods
self.__set_field_display()
if self._dynamic:
self._dynamic_lock = False
for key, value in dynamic_data.items():
setattr(self, key, value)
# Flag initialised
self._initialised = True
self._created = _created
signals.post_init.send(self.__class__, document=self)
def __delattr__(self, *args, **kwargs):
"""Handle deletions of fields"""
field_name = args[0]
if field_name in self._fields:
default = self._fields[field_name].default
if isinstance(default, collections.Callable):
default = default()
setattr(self, field_name, default)
else:
super(BaseDocument, self).__delattr__(*args, **kwargs)
def __setattr__(self, name, value):
# Handle dynamic data only if an initialised dynamic document
if self._dynamic and not self._dynamic_lock:
if not hasattr(self, name) and not name.startswith('_'):
DynamicField = _import_class('DynamicField')
field = DynamicField(db_field=name)
field.name = name
self._dynamic_fields[name] = field
self._fields_ordered += (name,)
if not name.startswith('_'):
value = self.__expand_dynamic_values(name, value)
# Handle marking data as changed
if name in self._dynamic_fields:
self._data[name] = value
if hasattr(self, '_changed_fields'):
self._mark_as_changed(name)
try:
self__created = self._created
except AttributeError:
self__created = True
if (
self._is_document and
not self__created and
name in self._meta.get('shard_key', tuple()) and
self._data.get(name) != value
):
msg = 'Shard Keys are immutable. Tried to update %s' % name
raise OperationError(msg)
try:
self__initialised = self._initialised
except AttributeError:
self__initialised = False
# Check if the user has created a new instance of a class
if (self._is_document and self__initialised and
self__created and name == self._meta.get('id_field')):
super(BaseDocument, self).__setattr__('_created', False)
super(BaseDocument, self).__setattr__(name, value)
def __getstate__(self):
data = {}
for k in ('_changed_fields', '_initialised', '_created',
'_dynamic_fields', '_fields_ordered'):
if hasattr(self, k):
data[k] = getattr(self, k)
data['_data'] = self.to_mongo()
return data
def __setstate__(self, data):
if isinstance(data['_data'], SON):
data['_data'] = self.__class__._from_son(data['_data'])._data
for k in ('_changed_fields', '_initialised', '_created', '_data',
'_dynamic_fields'):
if k in data:
setattr(self, k, data[k])
if '_fields_ordered' in data:
if self._dynamic:
setattr(self, '_fields_ordered', data['_fields_ordered'])
else:
_super_fields_ordered = type(self)._fields_ordered
setattr(self, '_fields_ordered', _super_fields_ordered)
dynamic_fields = data.get('_dynamic_fields') or SON()
for k in list(dynamic_fields.keys()):
setattr(self, k, data['_data'].get(k))
def __iter__(self):
return iter(self._fields_ordered)
def __getitem__(self, name):
"""Dictionary-style field access, return a field's value if present.
"""
try:
if name in self._fields_ordered:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __setitem__(self, name, value):
"""Dictionary-style field access, set a field's value.
"""
# Ensure that the field exists before settings its value
if not self._dynamic and name not in self._fields:
raise KeyError(name)
return setattr(self, name, value)
def __contains__(self, name):
try:
val = getattr(self, name)
return val is not None
except AttributeError:
return False
def __len__(self):
return len(self._data)
def __repr__(self):
try:
u = self.__str__()
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
repr_type = str if u is None else type(u)
return repr_type('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
# TODO this could be simpler?
if hasattr(self, '__unicode__'):
if six.PY3:
return self.__unicode__()
else:
return six.text_type(self).encode('utf-8')
return six.text_type('%s object' % self.__class__.__name__)
def __eq__(self, other):
if isinstance(other, self.__class__) and hasattr(other, 'id') and other.id is not None:
return self.id == other.id
if isinstance(other, DBRef):
return self._get_collection_name() == other.collection and self.id == other.id
if self.id is None:
return self is other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if getattr(self, 'pk', None) is None:
# For new object
return super(BaseDocument, self).__hash__()
else:
return hash(self.pk)
def clean(self):
"""
Hook for doing document level data cleaning before validation is run.
Any ValidationError raised by this method will not be associated with
a particular field; it will have a special-case association with the
field defined by NON_FIELD_ERRORS.
"""
pass
def get_text_score(self):
"""
Get text score from text query
"""
if '_text_score' not in self._data:
raise InvalidDocumentError('This document is not originally built from a text query')
return self._data['_text_score']
def to_mongo(self, use_db_field=True, fields=None):
"""
Return as SON data ready for use with MongoDB.
"""
if not fields:
fields = []
data = SON()
data['_id'] = None
data['_cls'] = self._class_name
# only root fields ['test1.a', 'test2'] => ['test1', 'test2']
root_fields = set([f.split('.')[0] for f in fields])
for field_name in self:
if root_fields and field_name not in root_fields:
continue
value = self._data.get(field_name, None)
field = self._fields.get(field_name)
if field is None and self._dynamic:
field = self._dynamic_fields.get(field_name)
if value is not None:
f_inputs = field.to_mongo.__code__.co_varnames
ex_vars = {}
if fields and 'fields' in f_inputs:
key = '%s.' % field_name
embedded_fields = [
i.replace(key, '') for i in fields
if i.startswith(key)]
ex_vars['fields'] = embedded_fields
if 'use_db_field' in f_inputs:
ex_vars['use_db_field'] = use_db_field
value = field.to_mongo(value, **ex_vars)
# Handle self generating fields
if value is None and field._auto_gen:
value = field.generate()
self._data[field_name] = value
if value is not None:
if use_db_field:
data[field.db_field] = value
else:
data[field.name] = value
# Only add _cls if allow_inheritance is True
if not self._meta.get('allow_inheritance'):
data.pop('_cls')
return data
def validate(self, clean=True):
"""Ensure that all fields' values are valid and that required fields
are present.
"""
# Ensure that each field is matched to a valid value
errors = {}
if clean:
try:
self.clean()
except ValidationError as error:
errors[NON_FIELD_ERRORS] = error
# Get a list of tuples of field names and their current values
fields = [(self._fields.get(name, self._dynamic_fields.get(name)),
self._data.get(name)) for name in self._fields_ordered]
EmbeddedDocumentField = _import_class('EmbeddedDocumentField')
GenericEmbeddedDocumentField = _import_class(
'GenericEmbeddedDocumentField')
for field, value in fields:
if value is not None:
try:
if isinstance(field, (EmbeddedDocumentField,
GenericEmbeddedDocumentField)):
field._validate(value, clean=clean)
else:
field._validate(value)
except ValidationError as error:
errors[field.name] = error.errors or error
except (ValueError, AttributeError, AssertionError) as error:
errors[field.name] = error
elif field.required and not getattr(field, '_auto_gen', False):
errors[field.name] = ValidationError('Field is required',
field_name=field.name)
if errors:
pk = 'None'
if hasattr(self, 'pk'):
pk = self.pk
elif self._instance and hasattr(self._instance, 'pk'):
pk = self._instance.pk
message = 'ValidationError (%s:%s) ' % (self._class_name, pk)
raise ValidationError(message, errors=errors)
def to_json(self, *args, **kwargs):
"""Convert this document to JSON.
:param use_db_field: Serialize field names as they appear in
MongoDB (as opposed to attribute names on this document).
Defaults to True.
"""
use_db_field = kwargs.pop('use_db_field', True)
return json_util.dumps(self.to_mongo(use_db_field), *args, **kwargs)
@classmethod
def from_json(cls, json_data, created=False):
"""Converts json data to an unsaved document instance"""
return cls._from_son(json_util.loads(json_data), created=created)
def __expand_dynamic_values(self, name, value):
"""Expand any dynamic values to their correct types / values."""
if not isinstance(value, (dict, list, tuple)):
return value
# If the value is a dict with '_cls' in it, turn it into a document
is_dict = isinstance(value, dict)
if is_dict and '_cls' in value:
cls = get_document(value['_cls'])
return cls(**value)
if is_dict:
value = {
k: self.__expand_dynamic_values(k, v)
for k, v in list(value.items())
}
else:
value = [self.__expand_dynamic_values(name, v) for v in value]
# Convert lists / values so we can watch for any changes on them
EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField')
if (isinstance(value, (list, tuple)) and
not isinstance(value, BaseList)):
if issubclass(type(self), EmbeddedDocumentListField):
value = EmbeddedDocumentList(value, self, name)
else:
value = BaseList(value, self, name)
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, self, name)
return value
def _mark_as_changed(self, key):
"""Mark a key as explicitly changed by the user."""
if not key:
return
if not hasattr(self, '_changed_fields'):
return
if '.' in key:
key, rest = key.split('.', 1)
key = self._db_field_map.get(key, key)
key = '%s.%s' % (key, rest)
else:
key = self._db_field_map.get(key, key)
if key not in self._changed_fields:
levels, idx = key.split('.'), 1
while idx <= len(levels):
if '.'.join(levels[:idx]) in self._changed_fields:
break
idx += 1
else:
self._changed_fields.append(key)
# remove lower level changed fields
level = '.'.join(levels[:idx]) + '.'
remove = self._changed_fields.remove
for field in self._changed_fields[:]:
if field.startswith(level):
remove(field)
def _clear_changed_fields(self):
"""Using _get_changed_fields iterate and remove any fields that
are marked as changed.
"""
for changed in self._get_changed_fields():
parts = changed.split('.')
data = self
for part in parts:
if isinstance(data, list):
try:
data = data[int(part)]
except IndexError:
data = None
elif isinstance(data, dict):
data = data.get(part, None)
else:
data = getattr(data, part, None)
if hasattr(data, '_changed_fields'):
if getattr(data, '_is_document', False):
continue
data._changed_fields = []
self._changed_fields = []
def _nestable_types_changed_fields(self, changed_fields, key, data, inspected):
# Loop list / dict fields as they contain documents
# Determine the iterator to use
if not hasattr(data, 'items'):
iterator = enumerate(data)
else:
iterator = iter(data.items())
for index, value in iterator:
list_key = '%s%s.' % (key, index)
# don't check anything lower if this key is already marked
# as changed.
if list_key[:-1] in changed_fields:
continue
if hasattr(value, '_get_changed_fields'):
changed = value._get_changed_fields(inspected)
changed_fields += ['%s%s' % (list_key, k)
for k in changed if k]
elif isinstance(value, (list, tuple, dict)):
self._nestable_types_changed_fields(
changed_fields, list_key, value, inspected)
def _get_changed_fields(self, inspected=None):
"""Return a list of all fields that have explicitly been changed.
"""
EmbeddedDocument = _import_class('EmbeddedDocument')
DynamicEmbeddedDocument = _import_class('DynamicEmbeddedDocument')
ReferenceField = _import_class('ReferenceField')
SortedListField = _import_class('SortedListField')
changed_fields = []
changed_fields += getattr(self, '_changed_fields', [])
inspected = inspected or set()
if hasattr(self, 'id') and isinstance(self.id, Hashable):
if self.id in inspected:
return changed_fields
inspected.add(self.id)
for field_name in self._fields_ordered:
db_field_name = self._db_field_map.get(field_name, field_name)
key = '%s.' % db_field_name
data = self._data.get(field_name, None)
field = self._fields.get(field_name)
if hasattr(data, 'id'):
if data.id in inspected:
continue
if isinstance(field, ReferenceField):
continue
elif (
isinstance(data, (EmbeddedDocument, DynamicEmbeddedDocument)) and
db_field_name not in changed_fields
):
# Find all embedded fields that have been changed
changed = data._get_changed_fields(inspected)
changed_fields += ['%s%s' % (key, k) for k in changed if k]
elif (isinstance(data, (list, tuple, dict)) and
db_field_name not in changed_fields):
if (hasattr(field, 'field') and
isinstance(field.field, ReferenceField)):
continue
elif isinstance(field, SortedListField) and field._ordering:
# if ordering is affected whole list is changed
if any([field._ordering in d._changed_fields for d in data]):
changed_fields.append(db_field_name)
continue
self._nestable_types_changed_fields(
changed_fields, key, data, inspected)
return changed_fields
def _delta(self):
"""Returns the delta (set, unset) of the changes for a document.
Gets any values that have been explicitly changed.
"""
# Handles cases where not loaded from_son but has _id
doc = self.to_mongo()
set_fields = self._get_changed_fields()
unset_data = {}
parts = []
if hasattr(self, '_changed_fields'):
set_data = {}
# Fetch each set item from its path
for path in set_fields:
parts = path.split('.')
d = doc
new_path = []
for p in parts:
if isinstance(d, (ObjectId, DBRef)):
break
elif isinstance(d, list) and p.lstrip('-').isdigit():
if p[0] == '-':
p = str(len(d) + int(p))
try:
d = d[int(p)]
except IndexError:
d = None
elif hasattr(d, 'get'):
d = d.get(p)
new_path.append(p)
path = '.'.join(new_path)
set_data[path] = d
else:
set_data = doc
if '_id' in set_data:
del set_data['_id']
# Determine if any changed items were actually unset.
for path, value in list(set_data.items()):
if value or isinstance(value, (numbers.Number, bool)):
continue
# If we've set a value that ain't the default value don't unset it.
default = None
if (self._dynamic and len(parts) and parts[0] in
self._dynamic_fields):
del set_data[path]
unset_data[path] = 1
continue
elif path in self._fields:
default = self._fields[path].default
else: # Perform a full lookup for lists / embedded lookups
d = self
parts = path.split('.')
db_field_name = parts.pop()
for p in parts:
if isinstance(d, list) and p.lstrip('-').isdigit():
if p[0] == '-':
p = str(len(d) + int(p))
d = d[int(p)]
elif (hasattr(d, '__getattribute__') and
not isinstance(d, dict)):
real_path = d._reverse_db_field_map.get(p, p)
d = getattr(d, real_path)
else:
d = d.get(p)
if hasattr(d, '_fields'):
field_name = d._reverse_db_field_map.get(db_field_name,
db_field_name)
if field_name in d._fields:
default = d._fields.get(field_name).default
else:
default = None
if default is not None:
if isinstance(default, collections.Callable):
default = default()
if default != value:
continue
del set_data[path]
unset_data[path] = 1
return set_data, unset_data
@classmethod
def _get_collection_name(cls):
"""Return the collection name for this class. None for abstract
class.
"""
return cls._meta.get('collection', None)
@classmethod
def _from_son(cls, son, _auto_dereference=True, only_fields=None, created=False):
"""Create an instance of a Document (subclass) from a PyMongo
SON.
"""
if not only_fields:
only_fields = []
if son and not isinstance(son, dict):
raise ValueError("The source SON object needs to be of type 'dict'")
# Get the class name from the document, falling back to the given
# class if unavailable
class_name = son.get('_cls', cls._class_name)
# Convert SON to a data dict, making sure each key is a string and
# corresponds to the right db field.
data = {}
for key, value in son.items():
key = str(key)
key = cls._db_field_map.get(key, key)
data[key] = value
# Return correct subclass for document type
if class_name != cls._class_name:
cls = get_document(class_name)
changed_fields = []
errors_dict = {}
fields = cls._fields
if not _auto_dereference:
fields = copy.copy(fields)
for field_name, field in fields.items():
field._auto_dereference = _auto_dereference
if field.db_field in data:
value = data[field.db_field]
try:
data[field_name] = (value if value is None
else field.to_python(value))
if field_name != field.db_field:
del data[field.db_field]
except (AttributeError, ValueError) as e:
errors_dict[field_name] = e
if errors_dict:
errors = '\n'.join(['%s - %s' % (k, v)
for k, v in list(errors_dict.items())])
msg = ('Invalid data to create a `%s` instance.\n%s'
% (cls._class_name, errors))
raise InvalidDocumentError(msg)
# In STRICT documents, remove any keys that aren't in cls._fields
if cls.STRICT:
data = {k: v for k, v in data.items() if k in cls._fields}
obj = cls(__auto_convert=False, _created=created, __only_fields=only_fields, **data)
obj._changed_fields = changed_fields
if not _auto_dereference:
obj._fields = fields
return obj
@classmethod
def _build_index_specs(cls, meta_indexes):
"""Generate and merge the full index specs."""
geo_indices = cls._geo_indices()
unique_indices = cls._unique_with_indexes()
index_specs = [cls._build_index_spec(spec) for spec in meta_indexes]
def merge_index_specs(index_specs, indices):
"""Helper method for merging index specs."""
if not indices:
return index_specs
# Create a map of index fields to index spec. We're converting
# the fields from a list to a tuple so that it's hashable.
spec_fields = {
tuple(index['fields']): index for index in index_specs
}
# For each new index, if there's an existing index with the same
# fields list, update the existing spec with all data from the
# new spec.
for new_index in indices:
candidate = spec_fields.get(tuple(new_index['fields']))
if candidate is None:
index_specs.append(new_index)
else:
candidate.update(new_index)
return index_specs
# Merge geo indexes and unique_with indexes into the meta index specs.
index_specs = merge_index_specs(index_specs, geo_indices)
index_specs = merge_index_specs(index_specs, unique_indices)
return index_specs
@classmethod
def _build_index_spec(cls, spec):
"""Build a PyMongo index spec from a MongoEngine index spec."""
if isinstance(spec, six.string_types):
spec = {'fields': [spec]}
elif isinstance(spec, (list, tuple)):
spec = {'fields': list(spec)}
elif isinstance(spec, dict):
spec = dict(spec)
index_list = []
direction = None
# Check to see if we need to include _cls
allow_inheritance = cls._meta.get('allow_inheritance')
include_cls = (
allow_inheritance and
not spec.get('sparse', False) and
spec.get('cls', True) and
'_cls' not in spec['fields']
)
# 733: don't include cls if index_cls is False unless there is an explicit cls with the index
include_cls = include_cls and (spec.get('cls', False) or cls._meta.get('index_cls', True))
if 'cls' in spec:
spec.pop('cls')
for key in spec['fields']:
# If inherited spec continue
if isinstance(key, (list, tuple)):
continue
# ASCENDING from +
# DESCENDING from -
# TEXT from $
# HASHED from #
# GEOSPHERE from (
# GEOHAYSTACK from )
# GEO2D from *
direction = pymongo.ASCENDING
if key.startswith('-'):
direction = pymongo.DESCENDING
elif key.startswith('$'):
direction = pymongo.TEXT
elif key.startswith('#'):
direction = pymongo.HASHED
elif key.startswith('('):
direction = pymongo.GEOSPHERE
elif key.startswith(')'):
direction = pymongo.GEOHAYSTACK
elif key.startswith('*'):
direction = pymongo.GEO2D
if key.startswith(('+', '-', '*', '$', '#', '(', ')')):
key = key[1:]
# Use real field name, do it manually because we need field
# objects for the next part (list field checking)
parts = key.split('.')
if parts in (['pk'], ['id'], ['_id']):
key = '_id'
else:
fields = cls._lookup_field(parts)
parts = []
for field in fields:
try:
if field != '_id':
field = field.db_field
except AttributeError:
pass
parts.append(field)
key = '.'.join(parts)
index_list.append((key, direction))
# Don't add cls to a geo index
if include_cls and direction not in (
pymongo.GEO2D, pymongo.GEOHAYSTACK, pymongo.GEOSPHERE):
index_list.insert(0, ('_cls', 1))
if index_list:
spec['fields'] = index_list
return spec
@classmethod
def _unique_with_indexes(cls, namespace=''):
"""Find unique indexes in the document schema and return them."""
unique_indexes = []
for field_name, field in list(cls._fields.items()):
sparse = field.sparse
# Generate a list of indexes needed by uniqueness constraints
if field.unique:
unique_fields = [field.db_field]
# Add any unique_with fields to the back of the index spec
if field.unique_with:
if isinstance(field.unique_with, six.string_types):
field.unique_with = [field.unique_with]
# Convert unique_with field names to real field names
unique_with = []
for other_name in field.unique_with:
parts = other_name.split('.')
# Lookup real name
parts = cls._lookup_field(parts)
name_parts = [part.db_field for part in parts]
unique_with.append('.'.join(name_parts))
# Unique field should be required
parts[-1].required = True
sparse = (not sparse and
parts[-1].name not in cls.__dict__)
unique_fields += unique_with
# Add the new index to the list
fields = [
('%s%s' % (namespace, f), pymongo.ASCENDING)
for f in unique_fields
]
index = {'fields': fields, 'unique': True, 'sparse': sparse}
unique_indexes.append(index)
if field.__class__.__name__ == 'ListField':
field = field.field
# Grab any embedded document field unique indexes
if (field.__class__.__name__ == 'EmbeddedDocumentField' and
field.document_type != cls):
field_namespace = '%s.' % field_name
doc_cls = field.document_type
unique_indexes += doc_cls._unique_with_indexes(field_namespace)
return unique_indexes
@classmethod
def _geo_indices(cls, inspected=None, parent_field=None):
inspected = inspected or []
geo_indices = []
inspected.append(cls)
geo_field_type_names = ('EmbeddedDocumentField', 'GeoPointField',
'PointField', 'LineStringField',
'PolygonField')
geo_field_types = tuple([_import_class(field)
for field in geo_field_type_names])
for field in list(cls._fields.values()):
if not isinstance(field, geo_field_types):
continue
if hasattr(field, 'document_type'):
field_cls = field.document_type
if field_cls in inspected:
continue
if hasattr(field_cls, '_geo_indices'):
geo_indices += field_cls._geo_indices(
inspected, parent_field=field.db_field)
elif field._geo_index:
field_name = field.db_field
if parent_field:
field_name = '%s.%s' % (parent_field, field_name)
geo_indices.append({
'fields': [(field_name, field._geo_index)]
})
return geo_indices
@classmethod
def _lookup_field(cls, parts):
"""Given the path to a given field, return a list containing
the Field object associated with that field and all of its parent
Field objects.
Args:
parts (str, list, or tuple) - path to the field. Should be a
string for simple fields existing on this document or a list
of strings for a field that exists deeper in embedded documents.
Returns:
A list of Field instances for fields that were found or
strings for sub-fields that weren't.
Example:
>>> user._lookup_field('name')
[<mongoengine.fields.StringField at 0x1119bff50>]
>>> user._lookup_field('roles')
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>]
>>> user._lookup_field(['roles', 'role'])
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,
<mongoengine.fields.StringField at 0x1119ec050>]
>>> user._lookup_field('doesnt_exist')
raises LookUpError
>>> user._lookup_field(['roles', 'doesnt_exist'])
[<mongoengine.fields.EmbeddedDocumentListField at 0x1119ec250>,
'doesnt_exist']
"""
# TODO this method is WAY too complicated. Simplify it.
# TODO don't think returning a string for embedded non-existent fields is desired
ListField = _import_class('ListField')
DynamicField = _import_class('DynamicField')
if not isinstance(parts, (list, tuple)):
parts = [parts]
fields = []
field = None
for field_name in parts:
# Handle ListField indexing:
if field_name.isdigit() and isinstance(field, ListField):
fields.append(field_name)
continue
# Look up first field from the document
if field is None:
if field_name == 'pk':
# Deal with "primary key" alias
field_name = cls._meta['id_field']
if field_name in cls._fields:
field = cls._fields[field_name]
elif cls._dynamic:
field = DynamicField(db_field=field_name)
elif cls._meta.get('allow_inheritance') or cls._meta.get('abstract', False):
# 744: in case the field is defined in a subclass
for subcls in cls.__subclasses__():
try:
field = subcls._lookup_field([field_name])[0]
except LookUpError:
continue
if field is not None:
break
else:
raise LookUpError('Cannot resolve field "%s"' % field_name)
else:
raise LookUpError('Cannot resolve field "%s"' % field_name)
else:
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
# If previous field was a reference, throw an error (we
# cannot look up fields that are on references).
if isinstance(field, (ReferenceField, GenericReferenceField)):
raise LookUpError('Cannot perform join in mongoDB: %s' %
'__'.join(parts))
# If the parent field has a "field" attribute which has a
# lookup_member method, call it to find the field
# corresponding to this iteration.
if hasattr(getattr(field, 'field', None), 'lookup_member'):
new_field = field.field.lookup_member(field_name)
# If the parent field is a DynamicField or if it's part of
# a DynamicDocument, mark current field as a DynamicField
# with db_name equal to the field name.
elif cls._dynamic and (isinstance(field, DynamicField) or
getattr(getattr(field, 'document_type', None), '_dynamic', None)):
new_field = DynamicField(db_field=field_name)
# Else, try to use the parent field's lookup_member method
# to find the subfield.
elif hasattr(field, 'lookup_member'):
new_field = field.lookup_member(field_name)
# Raise a LookUpError if all the other conditions failed.
else:
raise LookUpError(
'Cannot resolve subfield or operator {} '
'on the field {}'.format(field_name, field.name)
)
# If current field still wasn't found and the parent field
# is a ComplexBaseField, add the name current field name and
# move on.
if not new_field and isinstance(field, ComplexBaseField):
fields.append(field_name)
continue
elif not new_field:
raise LookUpError('Cannot resolve field "%s"' % field_name)
field = new_field # update field to the new field type
fields.append(field)
return fields
@classmethod
def _translate_field_name(cls, field, sep='.'):
"""Translate a field attribute name to a database field name.
"""
parts = field.split(sep)
parts = [f.db_field for f in cls._lookup_field(parts)]
return '.'.join(parts)
def __set_field_display(self):
"""For each field that specifies choices, create a
get_<field>_display method.
"""
fields_with_choices = [(n, f) for n, f in list(self._fields.items())
if f.choices]
for attr_name, field in fields_with_choices:
setattr(self,
'get_%s_display' % attr_name,
partial(self.__get_field_display, field=field))
def __get_field_display(self, field):
"""Return the display value for a choice field"""
value = getattr(self, field.name)
if field.choices and isinstance(field.choices[0], (list, tuple)):
return dict(field.choices).get(value, value)
return value
| Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/mongoengine/base/document.py | Python | gpl-3.0 | 42,028 |
#!/usr/bin/env python
import io
import netCDF4
import numpy
import m6plot
import m6toolbox
import os
import sys
def run():
try: import argparse
except: raise Exception('This version of python is not new enough. python 2.7 or newer is required.')
parser = argparse.ArgumentParser(description='''Script for plotting annual-average SST bias.''')
parser.add_argument('infile', type=str, nargs='+', help='''Annually-averaged file or csv list of files containing 2D 'sst'.''')
parser.add_argument('-l','--label', type=str, default='', help='''Label to add to the plot.''')
parser.add_argument('-s','--suptitle', type=str, default='', help='''Super-title for experiment. Default is to read from netCDF file.''')
parser.add_argument('-o','--outdir', type=str, default='.', help='''Directory in which to place plots.''')
parser.add_argument('-g','--gridspec', type=str, required=True,
help='''Directory containing mosaic/grid-spec files (ocean_hgrid.nc and ocean_mask.nc).''')
parser.add_argument('-w','--woa_monthly', type=str, required=True,
help='''File containing WOA (or obs) data to compare against.''')
cmdLineArgs = parser.parse_args()
main(cmdLineArgs)
def main(cmdLineArgs,stream=False):
numpy.seterr(divide='ignore', invalid='ignore', over='ignore') # To avoid warnings
if not os.path.exists(cmdLineArgs.gridspec): raise ValueError('Specified gridspec directory/tar file does not exist.')
if os.path.isdir(cmdLineArgs.gridspec):
x = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['x'][::2,::2]
xcenter = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['x'][1::2,1::2]
y = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['y'][::2,::2]
ycenter = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['y'][1::2,1::2]
msk = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_mask.nc').variables['mask'][:]
area = msk*netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['area'][:,:].reshape([msk.shape[0], 2, msk.shape[1], 2]).sum(axis=-3).sum(axis=-1)
depth = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_topog.nc').variables['depth'][:]
elif os.path.isfile(cmdLineArgs.gridspec):
x = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','x')[::2,::2]
xcenter = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','x')[1::2,1::2]
y = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','y')[::2,::2]
ycenter = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','y')[1::2,1::2]
msk = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_mask.nc','mask')[:]
area = msk*m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','area')[:,:].reshape([msk.shape[0], 2, msk.shape[1], 2]).sum(axis=-3).sum(axis=-1)
depth = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_topog.nc','depth')[:]
else:
raise ValueError('Unable to extract grid information from gridspec directory/tar file.')
# open dataset
rootGroup = netCDF4.MFDataset( cmdLineArgs.infile )
# gather months from input dataset
tvar = rootGroup.variables['time']
times = [netCDF4.num2date(i,tvar.units,calendar=tvar.calendar.lower()) for i in tvar[:]]
idx = list(set([i.month-1 for i in times]))
month_label = [i.strftime('%b') for i in times]
month_label = month_label[:len(idx)]
month_label = str.join(', ',month_label)
# read sst from model
if 'sst' in rootGroup.variables: varName = 'sst'
elif 'tos' in rootGroup.variables: varName = 'tos'
else: raise Exception('Could not find "sst", "ptemp" or "tos" in file "%s"'%(cmdLineArgs.infile))
if rootGroup.variables[varName].shape[0]>1: Tmod = rootGroup.variables[varName][:].mean(axis=0)
else: Tmod = rootGroup.variables[varName][0]
# read sst from obs
Tobs = netCDF4.Dataset( cmdLineArgs.woa_monthly )
if 'temp' in Tobs.variables: Tobs = Tobs.variables['temp']
elif 'ptemp' in Tobs.variables: Tobs = Tobs.variables['ptemp']
else: raise Exception('Could not find "temp" or "ptemp" in file "%s"'%(cmdLineArgs.woa_monthly))
if len(Tobs.shape)==3: Tobs = Tobs[0]
else: Tobs = Tobs[idx,0].mean(axis=0)
# create title for plot
if cmdLineArgs.suptitle != '': suptitle = cmdLineArgs.suptitle + ' ' + cmdLineArgs.label
else: suptitle = rootGroup.title + ' ' + cmdLineArgs.label
# invoke m6plot
imgbufs = []
ci=m6plot.pmCI(0.25,4.5,.5)
if stream is True: objOut = io.BytesIO()
else: objOut = cmdLineArgs.outdir+'/SST_bias_WOA05.png'
m6plot.xyplot( Tmod - Tobs , x, y, area=area,
suptitle=suptitle, title=month_label+' SST bias (w.r.t. WOA\'05) [$\degree$C]',
clim=ci, colormap='dunnePM', centerlabels=True, extend='both',
save=objOut)
if stream is True: imgbufs.append(objOut)
m6plot.xycompare( Tmod, Tobs , x, y, area=area,
suptitle=suptitle,
title1=month_label+' SST [$\degree$C]',
title2='WOA\'05 '+month_label+' SST [$\degree$C]',
clim=m6plot.linCI(-2,29,.5), colormap='dunneRainbow', extend='max',
dlim=ci, dcolormap='dunnePM', dextend='both', centerdlabels=True,
save=cmdLineArgs.outdir+'/SST_bias_WOA05.3_panel.png')
if stream is True:
return imgbufs
if __name__ == '__main__':
run()
| nicjhan/MOM6-examples | tools/analysis/SST_monthly_bias_WOA05.py | Python | gpl-3.0 | 5,253 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SoaRecord(Model):
"""An SOA record.
:param host: The domain name of the authoritative name server for this
SOA record.
:type host: str
:param email: The email contact for this SOA record.
:type email: str
:param serial_number: The serial number for this SOA record.
:type serial_number: long
:param refresh_time: The refresh value for this SOA record.
:type refresh_time: long
:param retry_time: The retry time for this SOA record.
:type retry_time: long
:param expire_time: The expire time for this SOA record.
:type expire_time: long
:param minimum_ttl: The minimum value for this SOA record. By convention
this is used to determine the negative caching duration.
:type minimum_ttl: long
"""
_attribute_map = {
'host': {'key': 'host', 'type': 'str'},
'email': {'key': 'email', 'type': 'str'},
'serial_number': {'key': 'serialNumber', 'type': 'long'},
'refresh_time': {'key': 'refreshTime', 'type': 'long'},
'retry_time': {'key': 'retryTime', 'type': 'long'},
'expire_time': {'key': 'expireTime', 'type': 'long'},
'minimum_ttl': {'key': 'minimumTTL', 'type': 'long'},
}
def __init__(self, host=None, email=None, serial_number=None, refresh_time=None, retry_time=None, expire_time=None, minimum_ttl=None):
self.host = host
self.email = email
self.serial_number = serial_number
self.refresh_time = refresh_time
self.retry_time = retry_time
self.expire_time = expire_time
self.minimum_ttl = minimum_ttl
| rjschwei/azure-sdk-for-python | azure-mgmt-dns/azure/mgmt/dns/models/soa_record.py | Python | mit | 2,124 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import getpass
import os
import signal
import socket
import struct
import subprocess
import sys
import threading
import time
from mesos.interface import mesos_pb2
from twitter.common import log
from twitter.common.dirutil import safe_mkdtemp
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Amount, Time
from apache.thermos.common.statuses import (
INTERNAL_ERROR,
INVALID_TASK,
TERMINAL_TASK,
UNKNOWN_ERROR,
UNKNOWN_USER
)
from apache.thermos.config.loader import ThermosTaskWrapper
from apache.thermos.core import runner as core
from apache.thermos.monitoring.monitor import TaskMonitor
from .common.status_checker import StatusResult
from .common.task_info import mesos_task_instance_from_assigned_task, resolve_ports
from .common.task_runner import TaskError, TaskRunner, TaskRunnerProvider
from .http_lifecycle import HttpLifecycleManager
from gen.apache.thermos.ttypes import TaskState
class ThermosTaskRunner(TaskRunner):
ESCALATION_WAIT = Amount(5, Time.SECONDS)
EXIT_STATE_MAP = {
TaskState.ACTIVE: StatusResult('Runner died while task was active.', mesos_pb2.TASK_LOST),
TaskState.FAILED: StatusResult('Task failed.', mesos_pb2.TASK_FAILED),
TaskState.KILLED: StatusResult('Task killed.', mesos_pb2.TASK_KILLED),
TaskState.LOST: StatusResult('Task lost.', mesos_pb2.TASK_LOST),
TaskState.SUCCESS: StatusResult('Task finished.', mesos_pb2.TASK_FINISHED),
}
MAX_WAIT = Amount(1, Time.MINUTES)
PEX_NAME = 'thermos_runner.pex'
POLL_INTERVAL = Amount(500, Time.MILLISECONDS)
THERMOS_PREEMPTION_WAIT = Amount(1, Time.MINUTES)
def __init__(self,
runner_pex,
task_id,
task,
role,
portmap,
sandbox,
checkpoint_root,
artifact_dir=None,
clock=time,
hostname=None,
process_logger_destination=None,
process_logger_mode=None,
rotate_log_size_mb=None,
rotate_log_backups=None,
preserve_env=False):
"""
runner_pex location of the thermos_runner pex that this task runner should use
task_id task_id assigned by scheduler
task thermos pystachio Task object
role role to run the task under
portmap { name => port } dictionary
sandbox the sandbox object
checkpoint_root the checkpoint root for the thermos runner
artifact_dir scratch space for the thermos runner (basically cwd of thermos.pex)
clock clock
preserve_env
"""
self._runner_pex = runner_pex
self._task_id = task_id
self._task = task
self._popen, self._popen_signal, self._popen_rc = None, None, None
self._monitor = None
self._status = None
self._ports = portmap
self._root = sandbox.root
self._checkpoint_root = checkpoint_root
self._enable_chroot = sandbox.chrooted
self._preserve_env = preserve_env
self._role = role
self._clock = clock
self._artifact_dir = artifact_dir or safe_mkdtemp()
self._hostname = hostname or socket.gethostname()
self._process_logger_destination = process_logger_destination
self._process_logger_mode = process_logger_mode
self._rotate_log_size_mb = rotate_log_size_mb
self._rotate_log_backups = rotate_log_backups
# wait events
self._dead = threading.Event()
self._kill_signal = threading.Event()
self.forking = threading.Event()
self.forked = threading.Event()
try:
with open(os.path.join(self._artifact_dir, 'task.json'), 'w') as fp:
self._task_filename = fp.name
ThermosTaskWrapper(self._task).to_file(self._task_filename)
except ThermosTaskWrapper.InvalidTask as e:
raise TaskError('Failed to load task: %s' % e)
@property
def artifact_dir(self):
return self._artifact_dir
def task_state(self):
return self._monitor.task_state() if self._monitor else None
@classmethod
def _decode_status(cls, status):
# Per os.waitpid documentation, status is:
# a 16-bit number, whose low byte is the signal number that killed the
# process, and whose high byte is the exit status (if the signal
# number is zero); the high bit of the low byte is set if a core file
# was produced.
exit_signal, exit_status = struct.unpack('bb', struct.pack('H', status))
return exit_signal & 0x7F, exit_status # strip the signal high bit
@property
def is_alive(self):
"""
Is the process underlying the Thermos task runner alive?
"""
if not self._popen:
return False
if self._dead.is_set():
return False
# N.B. You cannot mix this code and any code that relies upon os.wait
# mechanisms with blanket child process collection. One example is the
# Thermos task runner which calls os.wait4 -- without refactoring, you
# should not mix a Thermos task runner in the same process as this
# thread.
try:
pid, status = os.waitpid(self._popen.pid, os.WNOHANG)
if pid == 0:
return True
else:
self._popen_signal, self._popen_rc = self._decode_status(status)
log.info('Detected runner termination: pid=%s, signal=%s, rc=%s' % (
pid, self._popen_signal, self._popen_rc))
except OSError as e:
log.error('is_alive got OSError: %s' % e)
if e.errno != errno.ECHILD:
raise
self._dead.set()
return False
def compute_status(self):
if self.is_alive:
return None
if self._popen_signal != 0:
return StatusResult('Task killed by signal %s.' % self._popen_signal, mesos_pb2.TASK_KILLED)
if self._popen_rc == 0 or self._popen_rc == TERMINAL_TASK:
exit_state = self.EXIT_STATE_MAP.get(self.task_state())
if exit_state is None:
log.error('Received unexpected exit state from TaskMonitor.')
return StatusResult('Task checkpoint could not be read.', mesos_pb2.TASK_LOST)
else:
return exit_state
elif self._popen_rc == UNKNOWN_USER:
return StatusResult('Task started with unknown user.', mesos_pb2.TASK_FAILED)
elif self._popen_rc == INTERNAL_ERROR:
return StatusResult('Thermos failed with internal error.', mesos_pb2.TASK_LOST)
elif self._popen_rc == INVALID_TASK:
return StatusResult('Thermos received an invalid task.', mesos_pb2.TASK_FAILED)
elif self._popen_rc == UNKNOWN_ERROR:
return StatusResult('Thermos failed with an unknown error.', mesos_pb2.TASK_LOST)
else:
return StatusResult('Thermos exited for unknown reason (exit status: %s)' % self._popen_rc,
mesos_pb2.TASK_LOST)
def terminate_runner(self, as_loss=False):
"""
Terminate the underlying runner process, if it exists.
"""
if self._kill_signal.is_set():
log.warning('Duplicate kill/lose signal received, ignoring.')
return
self._kill_signal.set()
if self.is_alive:
sig = 'SIGUSR2' if as_loss else 'SIGUSR1'
log.info('Runner is alive, sending %s' % sig)
try:
self._popen.send_signal(getattr(signal, sig))
except OSError as e:
log.error('Got OSError sending %s: %s' % (sig, e))
else:
log.info('Runner is dead, skipping kill.')
def kill(self):
self.terminate_runner()
def lose(self):
self.terminate_runner(as_loss=True)
def quitquitquit(self):
"""Bind to the process tree of a Thermos task and kill it with impunity."""
try:
runner = core.TaskRunner.get(self._task_id, self._checkpoint_root)
if runner:
log.info('quitquitquit calling runner.kill')
# Right now preemption wait is hardcoded, though it may become configurable in the future.
runner.kill(force=True, preemption_wait=self.THERMOS_PREEMPTION_WAIT)
else:
log.error('Could not instantiate runner!')
except core.TaskRunner.Error as e:
log.error('Could not quitquitquit runner: %s' % e)
def _cmdline(self):
host_sandbox = None
if os.environ.get('MESOS_DIRECTORY'):
host_sandbox = os.path.join(os.environ.get('MESOS_DIRECTORY'), 'sandbox')
params = dict(log_dir=LogOptions.log_dir(),
log_to_disk='DEBUG',
checkpoint_root=self._checkpoint_root,
sandbox=host_sandbox or self._root,
task_id=self._task_id,
thermos_json=self._task_filename,
hostname=self._hostname,
process_logger_destination=self._process_logger_destination,
process_logger_mode=self._process_logger_mode,
rotate_log_size_mb=self._rotate_log_size_mb,
rotate_log_backups=self._rotate_log_backups)
if getpass.getuser() == 'root' and self._role:
params.update(setuid=self._role)
cmdline_args = [sys.executable, self._runner_pex]
cmdline_args.extend(
'--%s=%s' % (flag, value) for flag, value in params.items() if value is not None)
if self._enable_chroot:
cmdline_args.extend(['--enable_chroot'])
if self._preserve_env:
cmdline_args.extend(['--preserve_env'])
for name, port in self._ports.items():
cmdline_args.extend(['--port=%s:%s' % (name, port)])
return cmdline_args
# --- public interface
def start(self, timeout=MAX_WAIT):
"""Fork the task runner and return once the underlying task is running, up to timeout."""
self.forking.set()
self._monitor = TaskMonitor(self._checkpoint_root, self._task_id)
cmdline_args = self._cmdline()
log.info('Forking off runner with cmdline: %s' % ' '.join(cmdline_args))
cwd = os.environ.get('MESOS_DIRECTORY')
try:
self._popen = subprocess.Popen(cmdline_args, cwd=cwd)
except OSError as e:
raise TaskError(e)
self.forked.set()
self.wait_start(timeout=timeout)
def wait_start(self, timeout=MAX_WAIT):
log.debug('Waiting for task to start.')
def is_started():
return self._monitor and (self._monitor.active or self._monitor.finished)
waited = Amount(0, Time.SECONDS)
while waited < timeout:
if not is_started():
log.debug(' - sleeping...')
self._clock.sleep(self.POLL_INTERVAL.as_(Time.SECONDS))
waited += self.POLL_INTERVAL
else:
break
if not self.is_alive:
if self._popen_rc != 0:
raise TaskError('Task failed: %s' % self.compute_status().reason)
else:
# We can end up here if the process exited between the call to Popen and
# waitpid (in is_alive), which is fine.
log.info('Task runner exited: %s' % self.compute_status().reason)
break
if not is_started():
log.error('Task did not start with in deadline, forcing loss.')
self.lose()
raise TaskError('Task did not start within deadline.')
def stop(self, timeout=MAX_WAIT):
"""Stop the runner. If it's already completed, no-op. If it's still running, issue a kill."""
log.info('ThermosTaskRunner is shutting down.')
if not self.forking.is_set():
raise TaskError('Failed to call TaskRunner.start.')
log.info('Invoking runner.kill')
self.kill()
waited = Amount(0, Time.SECONDS)
while self.is_alive and waited < timeout:
self._clock.sleep(self.POLL_INTERVAL.as_(Time.SECONDS))
waited += self.POLL_INTERVAL
if not self.is_alive and self.task_state() != TaskState.ACTIVE:
return
log.info('Thermos task did not shut down cleanly, rebinding to kill.')
self.quitquitquit()
while not self._monitor.finished and waited < timeout:
self._clock.sleep(self.POLL_INTERVAL.as_(Time.SECONDS))
waited += self.POLL_INTERVAL
if not self._monitor.finished:
raise TaskError('Task did not stop within deadline.')
@property
def status(self):
"""Return the StatusResult of this task runner. This returns None as
long as no terminal state is reached."""
if self._status is None:
self._status = self.compute_status()
return self._status
class DefaultThermosTaskRunnerProvider(TaskRunnerProvider):
def __init__(self,
pex_location,
checkpoint_root,
artifact_dir=None,
preserve_env=False,
task_runner_class=ThermosTaskRunner,
max_wait=Amount(1, Time.MINUTES),
preemption_wait=Amount(1, Time.MINUTES),
poll_interval=Amount(500, Time.MILLISECONDS),
clock=time,
process_logger_destination=None,
process_logger_mode=None,
rotate_log_size_mb=None,
rotate_log_backups=None):
self._artifact_dir = artifact_dir or safe_mkdtemp()
self._checkpoint_root = checkpoint_root
self._preserve_env = preserve_env
self._clock = clock
self._max_wait = max_wait
self._pex_location = pex_location
self._poll_interval = poll_interval
self._preemption_wait = preemption_wait
self._task_runner_class = task_runner_class
self._process_logger_destination = process_logger_destination
self._process_logger_mode = process_logger_mode
self._rotate_log_size_mb = rotate_log_size_mb
self._rotate_log_backups = rotate_log_backups
def _get_role(self, assigned_task):
return None if assigned_task.task.container.docker else assigned_task.task.job.role
def from_assigned_task(self, assigned_task, sandbox):
task_id = assigned_task.taskId
role = self._get_role(assigned_task)
try:
mesos_task = mesos_task_instance_from_assigned_task(assigned_task)
except ValueError as e:
raise TaskError('Could not deserialize Thermos task from AssignedTask: %s' % e)
mesos_ports = resolve_ports(mesos_task, assigned_task.assignedPorts)
class ProvidedThermosTaskRunner(self._task_runner_class):
MAX_WAIT = self._max_wait
POLL_INTERVAL = self._poll_interval
THERMOS_PREEMPTION_WAIT = self._preemption_wait
runner = ProvidedThermosTaskRunner(
self._pex_location,
task_id,
mesos_task.task(),
role,
mesos_ports,
sandbox,
self._checkpoint_root,
artifact_dir=self._artifact_dir,
clock=self._clock,
hostname=assigned_task.slaveHost,
process_logger_destination=self._process_logger_destination,
process_logger_mode=self._process_logger_mode,
rotate_log_size_mb=self._rotate_log_size_mb,
rotate_log_backups=self._rotate_log_backups,
preserve_env=self._preserve_env)
return HttpLifecycleManager.wrap(runner, mesos_task, mesos_ports)
class UserOverrideThermosTaskRunnerProvider(DefaultThermosTaskRunnerProvider):
def set_role(self, role):
self._role = role
def _get_role(self, assigned_task):
return self._role
| protochron/aurora | src/main/python/apache/aurora/executor/thermos_task_runner.py | Python | apache-2.0 | 15,490 |
# PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
from qgis.core import *
from qgis.gui import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ClickTool(QgsMapTool):
def __init__(self, canvas):
QgsMapTool.__init__(self,canvas)
self.canvas=canvas
self.ll = None
self.ur = None
self.o = QObject()
self.precision = 0.000001
self.cursor = QCursor(Qt.PointingHandCursor)
def canvasPressEvent(self,event):
transform = self.canvas.getCoordinateTransform()
coord = transform.toMapCoordinates(event.pos().x(),event.pos().y())
self.bb = QgsRect(
QgsPoint(coord.x()*(1-self.precision),coord.y()*(1-self.precision)),
QgsPoint(coord.x()*(1+self.precision),coord.y()*(1+self.precision))
)
self.o.emit(SIGNAL("finished()"))
def isZoomTool(self):
return False
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/synthesizer/gui/misc/pointSelectTool.py | Python | gpl-2.0 | 1,035 |
# Sample client program to communicate with gpsim
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("", 0x1234))
s.send("help\n")
print s.recv(20)
# helper functions
def ParseInt(buf):
if buf[0:3] == "$03" :
return eval("0x"+buf[3:11])
return 0
# Symbol Command
s.send("$"+"97"+"0205start")
r = s.recv(20)
start_adr = ParseInt(r)
print "Start address:" + str(start_adr)
s.send("reset\n")
print s.recv(20)
s.send("step\n")
print s.recv(20)
s.send("step\n")
print s.recv(20)
# define an example input and send it to the simulator:
x=4
count="0300000020"
s.send("$"+"99"+ count + "03%08x"%x)
# another example of assigning a memory location
r = s.recv(20)
s.send("$"+"99"+ "0300000040" + "03000000ff")
r = s.recv(20)
# Move the program counter (doesn't work right now)
PCL="0300000002"
# Assign RAM command
#s.send("$"+"99"+ PCL +"03" + "%08x"%start_adr)
#st = "%08x"%start_adr
#s.send("$"+"99"+ PCL + "030000002")
#r = s.recv(20)
#print r
# start the simulation:
s.send("run\n")
print s.recv(20)
# Examine the results
xsq_lo_adr = "0300000022"
s.send("$"+"91"+xsq_lo_adr)
r = s.recv(20)
xsq_lo = ParseInt(r)
print "received:" + r + " decoded as " + str(xsq_lo)
xsq_hi_adr = "0300000023"
s.send("$"+"91"+xsq_hi_adr)
r = s.recv(20)
xsq_hi = ParseInt(r)
print "received:" + r + " decoded as " + str(xsq_hi)
xsq = xsq_hi*256 + xsq_lo
print " Sent " + str(x)
print " Received " + str(xsq)
if x*x == xsq :
print "PASSED"
s.close()
| WellDone/gpsim | src/examples/scripts/testgensquares.py | Python | gpl-2.0 | 1,495 |
# -*- coding: utf-8 -*-
class DataType(object):
def __init__(self, type, preprocessor=None):
self.type = type
self.preprocessor = preprocessor
def preprocess(self, value):
return self.preprocessor(value) if self.preprocessor else value
def serialize(self, value):
return value
def unserialize(self, value):
processed = self.preprocess(value)
if isinstance(processed, self.type):
return processed
return self.type(processed)
| aa403/betfair.py | betfair/meta/datatype.py | Python | mit | 514 |
# pyv8_print_fn is actually in pyv8run.py and is added to the Globals
def printFunc(objs, newline):
JS("""
var s = "";
for(var i=0; i < @{{objs}}.length; i++) {
if(s != "") s += " ";
s += @{{objs}}[i];
}
@{{pyv8_print_fn}}(s);
""")
# pyv8_import_module is actually in pyv8run.py and has been added to Globals.
def import_module(syspath, parent_name, module_name, dynamic_load, async, init):
JS("""
@{{module}} = $pyjs.modules_hash[@{{module_name}}];
if (typeof @{{module}} == 'function' && @{{module}}.__was_initialized__ == true) {
return null;
}
if (@{{module_name}} == 'sys' || @{{module_name}} == 'pyjslib') {
@{{module}}();
return null;
}
""")
names = module_name.split(".")
importName = ''
# Import all modules in the chain (import a.b.c)
for name in names:
importName += name
JS("""@{{module}} = $pyjs.modules_hash[@{{importName}}];""")
if not isUndefined(module):
# Not initialized, but present. Must be pyjs module.
if JS("@{{module}}.__was_initialized__ != true"):
# Module wasn't initialized
module()
else:
# Get a pytjon module from PyV8
initialized = False
try:
JS("@{{initialized}} = (@{{module}}.__was_initialized__ != true)")
except:
pass
if not initialized:
# Module wasn't initialized
module = pyv8_import_module(parent_name, module_name)
module.__was_initialized__ = True
JS("""$pyjs.modules_hash[@{{importName}}] = @{{module}}""")
importName += '.'
name = names[0]
JS("""$pyjs.modules[@{{name}}] = $pyjs.modules_hash[@{{name}}];""")
return None
# FIXME: dynamic=1, async=False are useless here (?). Only dynamic modules
# are loaded with load_module and it's always "async"
@noSourceTracking
def load_module(path, parent_module, module_name, dynamic=1, async=False):
"""
"""
JS("""
var cache_file;
var module = $pyjs.modules_hash[@{{module_name}}];
if (typeof module == 'function') {
return true;
}
if (!@{{dynamic}}) {
// There's no way we can load a none dynamic module
return false;
}
if (@{{path}} == null)
{
@{{path}} = './';
}
var override_name = @{{sys}}.platform + "." + @{{module_name}};
if (((@{{sys}}.overrides != null) &&
(@{{sys}}.overrides.has_key(override_name))))
{
cache_file = @{{sys}}.overrides.__getitem__(override_name) ;
}
else
{
cache_file = @{{module_name}} ;
}
cache_file = (@{{path}} + cache_file + '.cache.js' ) ;
//alert("cache " + cache_file + " " + module_nameXXX + " " + parent_moduleXXX);
var onload_fn = '';
// this one tacks the script onto the end of the DOM
@{{pyjs_load_script}}(cache_file, onload_fn, @{{async}});
try {
var loaded = (typeof $pyjs.modules_hash[@{{module_name}}] == 'function')
} catch ( e ) {
}
if (loaded) {
return true;
}
return false;
""")
@noSourceTracking
def load_module_wait(proceed_fn, parent_mod, module_list, dynamic):
module_list = module_list.getArray()
JS("""
var wait_count = 0;
//var data = '';
//var element = $doc.createElement("div");
//element.innerHTML = '';
//$doc.body.appendChild(element);
//function write_dom(txt) {
// element.innerHTML += txt;
//}
var timeoutperiod = 1;
if (@{{dynamic}})
var timeoutperiod = 1;
var wait = function() {
wait_count++;
//write_dom(".");
var loaded = true;
for (var i in @{{module_list}}) {
if (typeof $pyjs.modules_hash[@{{module_list}}[i]] != 'function') {
loaded = false;
break;
}
}
if (!loaded) {
setTimeout(wait, timeoutperiod);
} else {
if (@{{proceed_fn}}.importDone)
@{{proceed_fn}}.importDone(@{{proceed_fn}});
else
@{{proceed_fn}}();
//$doc.body.removeChild(element);
}
}
//write_dom("Loading modules ");
wait();
""")
# There seems to be an bug in Chrome with accessing the message
# property, on which an error is thrown
# Hence the declaration of 'var message' and the wrapping in try..catch
def init():
JS("""
@{{_errorMapping}} = function(err) {
if (err instanceof(ReferenceError) || err instanceof(TypeError)) {
var message = ''
try {
message = err.message;
} catch ( e) {
}
return @{{AttributeError}}(message);
}
return err
}
@{{TryElse}} = function () { };
@{{TryElse}}.prototype = new Error();
@{{TryElse}}.__name__ = 'TryElse';
@{{TryElse}}.message = 'TryElse';
@{{StopIteration}} = function () { };
@{{StopIteration}}.prototype = new Error();
@{{StopIteration}}.__name__ = 'StopIteration';
@{{StopIteration}}.message = 'StopIteration';
@{{String_find}} = function(sub, start, end) {
var pos=this.indexOf(sub, start);
if (@{{isUndefined}}(end)) return pos;
if (pos + sub.length>end) return -1;
return pos;
}
@{{String_join}} = function(data) {
var text="";
if (@{{isArray}}(data)) {
return data.join(this);
}
else if (@{{isIteratable}}(data)) {
var iter=data.__iter__();
try {
text+=iter.next();
while (true) {
var item=iter.next();
text+=this + item;
}
}
catch (e) {
if (e.__name__ != 'StopIteration') throw e;
}
}
return text;
}
@{{String_isdigit}} = function() {
return (this.match(/^\d+$/g) != null);
}
@{{String_replace}} = function(old, replace, count) {
var do_max=false;
var start=0;
var new_str="";
var pos=0;
if (!@{{isString}}(old)) return this.__replace(old, replace);
if (!@{{isUndefined}}(count)) do_max=true;
while (start<this.length) {
if (do_max && !count--) break;
pos=this.indexOf(old, start);
if (pos<0) break;
new_str+=this.substring(start, pos) + replace;
start=pos+old.length;
}
if (start<this.length) new_str+=this.substring(start);
return new_str;
}
@{{String_split}} = function(sep, maxsplit) {
var items=new @{{List}}();
var do_max=false;
var subject=this;
var start=0;
var pos=0;
if (@{{isUndefined}}(sep) || @{{isNull}}(sep)) {
sep=" ";
subject=subject.strip();
subject=subject.replace(/\s+/g, sep);
}
else if (!@{{isUndefined}}(maxsplit)) do_max=true;
if (subject.length == 0) {
return items;
}
while (start<subject.length) {
if (do_max && !maxsplit--) break;
pos=subject.indexOf(sep, start);
if (pos<0) break;
items.append(subject.substring(start, pos));
start=pos+sep.length;
}
if (start<=subject.length) items.append(subject.substring(start));
return items;
}
@{{String___iter__}} = function() {
var i = 0;
var s = this;
return {
'next': function() {
if (i >= s.length) {
throw @{{StopIteration}};
}
return s.substring(i++, i, 1);
},
'__iter__': function() {
return this;
}
};
}
@{{String_strip}} = function(chars) {
return this.lstrip(chars).rstrip(chars);
}
@{{String_lstrip}} = function(chars) {
if (@{{isUndefined}}(chars)) return this.replace(/^\s+/, "");
return this.replace(new RegExp("^[" + chars + "]+"), "");
}
@{{String_rstrip}} = function(chars) {
if (@{{isUndefined}}(chars)) return this.replace(/\s+$/, "");
return this.replace(new RegExp("[" + chars + "]+$"), "");
}
@{{String_startswith}} = function(prefix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (@{{isUndefined}}(start)) start = 0;
if (@{{isUndefined}}(end)) end = this.length;
if ((end - start) < prefix.length) return false
if (this.substr(start, prefix.length) == prefix) return true;
return false;
}
@{{String_endswith}} = function(suffix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (@{{isUndefined}}(start)) start = 0;
if (@{{isUndefined}}(end)) end = this.length;
if ((end - start) < suffix.length) return false
if (this.substr(end - suffix.length, suffix.length) == suffix) return true;
return false;
}
@{{String_ljust}} = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (@{{TypeError}}("an integer is required"));
}
if (@{{isUndefined}}(fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw (@{{TypeError}}("ljust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
return this + new Array(width+1 - this.length).join(fillchar);
}
@{{String_rjust}} = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (@{{TypeError}}("an integer is required"));
}
if (@{{isUndefined}}(fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw (@{{TypeError}}("rjust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
return new Array(width + 1 - this.length).join(fillchar) + this;
}
@{{String_center}} = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (@{{TypeError}}("an integer is required"));
}
if (@{{isUndefined}}(fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar.length != 1) {
throw (@{{TypeError}}("center() argument 2 must be char, not " + typeof(fillchar)));
}
if (this.length >= width) return this;
var padlen = width - this.length;
var right = Math.ceil(padlen / 2);
var left = padlen - right;
return new Array(left+1).join(fillchar) + this + new Array(right+1).join(fillchar);
}
@{{abs}} = Math.abs;
""")
| minghuascode/pyj | library/pyjslib.PyV8.py | Python | apache-2.0 | 10,519 |
''' Selection Module '''
from flask import Blueprint, jsonify, abort, request
from karmaserver.constants import MAX_KARMA_LEVEL, MAX_FILTER_LEVEL
from karmaserver.data.content_resolver import content_resolver
from karmaserver.modules.selection.provider import ObservationSelectionProvider
from karmaserver.utils import serialize_response
selection = Blueprint('selection', __name__,
url_prefix='/selection')
SELECTION_PROVIDER = ObservationSelectionProvider(MAX_KARMA_LEVEL, MAX_FILTER_LEVEL)
@selection.route('/discover', methods=['GET'])
def get_observation_for_discovery():
''' Returns the information for the id passed '''
user_id, karma_level = __get_request_args_or_abort()
observation = SELECTION_PROVIDER.select_observation_for_discover(user_id, karma_level)
if observation:
return serialize_response(200, 'OK', 'OK', observation)
return serialize_response(204, 'No Content', 'No Valid Observation')
@selection.route('/vote', methods=['GET'])
def get_observation_for_vote():
''' Returns the information for the id passed '''
user_id, karma_level = __get_request_args_or_abort()
observation = SELECTION_PROVIDER.select_observation_for_votation(user_id, karma_level)
if observation:
return serialize_response(200, 'OK', 'OK', observation)
return serialize_response(204, 'No Content', 'No Valid Observation')
def __get_request_args_or_abort():
user_id = request.args.get('user')
karma_level = request.args.get('karma')
if user_id and karma_level:
try:
karma_level = int(karma_level)
return user_id, karma_level
except ValueError:
abort(400, 'Karma must be a number')
abort(400, 'You have to pass user and karma as url params')
| mnunezdm/cazasteroides | karmaserver/modules/selection/__init__.py | Python | mit | 1,789 |
#!/usr/bin/env python
#
# This file is part of the Fun SDK (fsdk) project. The complete source code is
# available at https://github.com/luigivieira/fsdk.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import csv
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
#---------------------------------------------
def main(argv):
"""
Main entry point of this utility application.
This is simply a function called by the checking of namespace __main__, at
the end of this script (in order to execute only when this script is ran
directly).
Parameters
------
argv: list of str
Arguments received from the command line.
"""
annotationsPath = 'C:/Users/luigi/Dropbox/Doutorado/dataset/annotation-all'
#annotationsPath = 'C:/temp/teste'
print('Reading data...')
data = {}
for dirpath, _, filenames in os.walk(annotationsPath):
for f in filenames:
name = os.path.splitext(f)[0]
parts = name.split('-')
if len(parts) != 2 or parts[1] != 'face':
continue
subject = parts[0].split('_')[1]
fileName = os.path.join(dirpath, f)
print('\tfile {}...'.format(fileName))
# Read the distance data
fails = []
lastFrame = 0
with open(fileName, 'r', newline='') as file:
reader = csv.reader(file, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
next(reader, None) # Ignore header
for row in reader:
lastFrame = int(row[0])
if not any([float(i) for i in row[1:]]):
fails.append(int(row[0]) / 30 / 60)
data[subject] = fails
print('Plotting data...')
subjects = []
times = []
for s, v in data.items():
for t in v:
subjects.append(int(s))
times.append(t)
ax = sns.stripplot(x=subjects, y=times, linewidth=1)
ax.set_xlabel('Subjects', fontsize=15)
ax.set_ylabel('Video Progress (in Minutes)', fontsize=15)
ax.set_ylim([0, 10])
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.suptitle('Face Detection Failures', fontsize=30)
plt.show()
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | luigivieira/fsdk | fsdk/reports/immersion.py | Python | mit | 3,652 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import Node, Way, Relation, Member
class OSMFactory(object):
'''Subclass and implement to create basic OSM types'''
def createNode(self, id):
return Node(id)
def createWay(self, id):
return Way(id)
def createRelation(self, id):
return Relation(id)
def createMember(self, typ, id, role):
return Member(typ, id, role)
| larroy/osmcompiler | osm/factory.py | Python | agpl-3.0 | 425 |
from pudzu.charts import *
df = pd.read_csv("datasets/flagshearts.csv")
groups = list(remove_duplicates(df.group))
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups]
data = pd.DataFrame(array, index=list(remove_duplicates(df.group)))
FONT = calibri or sans
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "({})".format(description) if description else " "
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=198) if flag.width / flag.height < 1.3 else flag.resize((318,198))
flag = flag.pad(0 if "coat" in d['group'] else (1,1,0,1) if "Finland" in str(d['description']) else 1, "grey")
return Image.from_column([
Image.from_text_bounded(d['name'].replace(r"\n","\n"), (320 if "Switzerland" not in description else 200,200), 32, partial(FONT, bold=True), beard_line=True, align="center", fg=fg),
Image.from_text_bounded(description, (320 if "Switzerland" not in description else 200,200), 24, partial(FONT, italics=True), align="center", fg=fg),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text(f"flags with hearts".upper(), FONT(80, bold=True), fg=fg, bg=bg).pad(40, bg)
footer = Image.from_text("* actually water lilies ** mà (hand) + cor (heart), geddit?", FONT(24, italics=True), fg=fg, bg=bg).pad(20, bg)
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=(0.5,1,0.5), row_label=lambda r: None if data.index[r].startswith("_") else Image.from_text("{}".format(data.index[r].replace(r"\n","\n")).upper(), FONT(32, bold=True), align="center", line_spacing=3))
img = Image.from_column([title, grid, footer], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagshearts.png")
| Udzu/pudzu | dataviz/flagshearts.py | Python | mit | 2,052 |
from __future__ import division
import numbers
import numpy as np
from numpy.lib.stride_tricks import as_strided
from warnings import warn
__all__ = ['view_as_blocks', 'view_as_windows']
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in : ndarray
N-d input array.
block_shape : tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out : ndarray
Block view of the input array. If `arr_in` is non-contiguous, a copy
is made.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
[[[76, 77],
[82, 83]]]])
"""
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length "
"as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
if not arr_in.flags.contiguous:
warn(RuntimeWarning("Cannot provide views on a non-contiguous input "
"array without copying."))
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape // block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in : ndarray
N-d input array.
window_shape : integer or tuple of length arr_in.ndim
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
If an integer is given, the shape will be a hypercube of
sidelength given by its value.
step : integer or tuple of length arr_in.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
arr_out : ndarray
(rolling) window view of the input array. If `arr_in` is
non-contiguous, a copy is made.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
ndim = arr_in.ndim
if isinstance(window_shape, numbers.Number):
window_shape = (window_shape,) * ndim
if not (len(window_shape) == ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if isinstance(step, numbers.Number):
if step < 1:
raise ValueError("`step` must be >= 1")
step = (step,) * ndim
if len(step) != ndim:
raise ValueError("`step` is incompatible with `arr_in.shape`")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
if not arr_in.flags.contiguous:
warn(RuntimeWarning("Cannot provide views on a non-contiguous input "
"array without copying."))
arr_in = np.ascontiguousarray(arr_in)
slices = tuple(slice(None, None, st) for st in step)
window_strides = np.array(arr_in.strides)
indexing_strides = arr_in[slices].strides
win_indices_shape = (((np.array(arr_in.shape) - np.array(window_shape))
// np.array(step)) + 1)
new_shape = tuple(list(win_indices_shape) + list(window_shape))
strides = tuple(list(indexing_strides) + list(window_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=strides)
return arr_out
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/util/shape.py | Python | gpl-3.0 | 8,436 |
#HOW TO RUN: change variables if wanted - lines 12-13 - and run without arguments
from threading import Thread, Semaphore
from time import sleep
import sys
from collections import deque
# Thread safe print
printf = lambda x: sys.stdout.write("%s\n" % x)
## Main setup
n_leaders = 2
n_followers = 5
##
## Advanced setup
# number of time each dance is played
n_periods = 1
# time each dance is played
dancelength = 5
# time each dancer spend on dancefloor
timeondancefloor = 2
#list of dances
dances = ['waltz', 'tango', 'foxtrot']
# Dancers name prefixes
leader = "Leader "
follower = "Follower"
##
## Global variables
# Global variable that allows a leader to know and print whom he is dancing with
follower_name = "Follower -1"
# rdv semaphores
leaderArrived = Semaphore(0)
followerArrived = Semaphore(0)
# Loop condition for dances
DANCETIME = True
##
## Dancefloor representation
class Dancefloor:
# Mutex that block access to dancefloor status is given by closed value
dancefloor_open = Semaphore(0)
# Mutex protecting count variables
dancefloor_mtx = Semaphore(1)
# Mutex that wakes up band leader when dancefloor is empty
dancefloor_empty = Semaphore(0)
#number of dancing couples on the dancefloor
count = 0
closed = True
@staticmethod
def open():
Dancefloor.dancefloor_open.release()
@staticmethod
def close():
Dancefloor.dancefloor_open.acquire()
Dancefloor.dancefloor_empty.acquire()
@staticmethod
def enter():
Dancefloor.dancefloor_open.acquire()
Dancefloor.dancefloor_mtx.acquire()
Dancefloor.count += 1
Dancefloor.dancefloor_mtx.release()
Dancefloor.dancefloor_open.release()
@staticmethod
def exit():
Dancefloor.dancefloor_mtx.acquire()
Dancefloor.count -= 1
if Dancefloor.count == 0 and Dancefloor.closed:
Dancefloor.dancefloor_empty.release()
Dancefloor.dancefloor_mtx.release()
##
## FIFO Queues for both leaders and followers
class Queues:
leadersQ = deque()
followersQ = deque()
# to avoid pulling from empty queues, we protect them with semaphores
nleaders = Semaphore(0)
nfollowers = Semaphore(0)
@staticmethod
def append(role, ticket):
if role == leader:
Queues.leadersQ.appendleft(ticket)
Queues.nleaders.release()
else:
Queues.followersQ.appendleft(ticket)
Queues.nfollowers.release()
@staticmethod
def pop():
Queues.nleaders.acquire()
Queues.nfollowers.acquire()
Queues.leadersQ.pop().release()
Queues.followersQ.pop().release()
##
## Generic dancer class -init makes it a leader or a follower-
class Dancer:
def __init__(self, role, idx):
global leader, leaderArrived, followerArrived
self.role = role
self.idx = idx
self.name = role + " " + str(idx)
self.queue_ticket = Semaphore(0)
if role == leader:
self.arrivedSem = leaderArrived
self.partnerSem = followerArrived
else:
self.arrivedSem = followerArrived
self.partnerSem = leaderArrived
def run(self):
global DANCETIME, dancefloor_open, timeondancefloor
printf(self.name + " gets in line.")
while(True):
# registering to Queues
Queues.append(self.role, self.queue_ticket)
# waiting for its turn
self.queue_ticket.acquire()
# waiting for partner
Dancefloor.enter()
# check if dancing is over at wake up
if not DANCETIME:
printf(self.name + " PARTY IS OVER!")
break
printf(self.name + " entering the floor.")
if self.role == leader:
global follower_name
self.partnerSem.acquire()
partner_name = follower_name
self.arrivedSem.release()
printf(self.name + " and " + partner_name + " are dancing.")
# call to Queue to unlock next couple
Queues.pop()
# This configuration blocks the Leader until the follower actually arrived,
# hence protection of the follower_name variable
else:
follower_name = self.name
self.arrivedSem.release()
self.partnerSem.acquire()
sleep(timeondancefloor)
Dancefloor.exit()
printf(self.name + " gets back in line.")
##
## Band leader, switches between songs
class BandLeader:
def __init__(self):
global n_periods, dancelength
self.periods = n_periods
self.length = dancelength
def run(self):
global dances, leader, follower
# kick the first couple out of the waiting queue
Queues.pop()
for _ in range(0,self.periods):
for dance in dances:
printf("\n** Band Leader start playing " + dance + " **")
Dancefloor.open()
sleep(dancelength)
Dancefloor.close()
printf("** Band Leader stop playing " + dance + " **\n")
##
if __name__ == '__main__':
printf("Note : the message printing and the action it describe are not atomic, so it sometimes messes things up")
# Bandleader
bl = BandLeader()
bandleader_th = Thread(target= bl.run)
dancers = []
# Leaders
leaders_th = []
for i in range(0, n_leaders):
d = Dancer(leader,i)
dancers.append(d)
leaders_th.append(Thread(target = d.run))
leaders_th[i].start()
# Followers
followers_th = []
for i in range(0, n_followers):
d = Dancer(follower,i)
dancers.append(d)
followers_th.append(Thread(target = d.run))
followers_th[i].start()
bandleader_th.start()
bandleader_th.join()
# wait for the last couple to finish their dance
sleep(timeondancefloor)
DANCETIME = False
# wake up all dancers
# when exiting the band leader still holds the dancefloor_open mutex
Dancefloor.open()
for d in dancers:
d.queue_ticket.release()
# thread join on all dancers
for i in range(0, n_leaders):
leaders_th[i].join()
for i in range(0, n_followers):
followers_th[i].join()
print("\n///---------\\\\\\\n SUCCESS\n\\\\\\---------///") | ThomasDq/xv6OS | python/src/2_dance_mixer.py | Python | mit | 6,735 |
# -*- coding: utf-8 -*-
from math import *
from hotspin import *
Nx = 512
Ny = 512
Nz = 1
sX = 512e-9
sY = 512e-9
sZ = 15e-9
csX = sX / Nx
csY = sY / Ny
csZ = sZ / Nz
# set mesh size
setgridsize(Nx, Ny, Nz)
# set cell size
setcellsize(csX, csY, csZ)
# enable PBC in-plane
setperiodic(1, 1, 0)
# load heat equation for electrons
load('temperature/ETM')
# load heat diffusion term for electrons
load('temperature/ETM/diffusion')
# add heat diffusion term to electron' heat equation
add_to('Qe', 'Qe_spat')
# use Adams-Moulton solver as it is way more stable than Runge-Kutta
load('solver/am12')
setv('Te_maxabserror', 1e-5)
setv('Te_maxrelerror', 1e-5)
savegraph("deps.png")
# setup initial profile of the temperature
T = makearray(1, Nx, Ny, Nz)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
val = 10.0
if (i > 128) and (i < 384):
if (j > 128) and (j < 384):
val = 300.0
T[0][i][j][k] = val
setarray('Te', T)
# save initial temperature profile
save('Te', "dump", [])
# set volume-specific heat capacity of electrons, [J/(K*m^3)]
setv('Ce', 1.0e6)
# set heat conductivity of electrons, [W/(K*m)]
setv('Ke', 91.0)
# schedule output
autosave("Te", "dump", [], 1e-12)
autotabulate(["t", "<Te>"], "Te.dat", 1e-12)
# set initial time step
setv('dt', 1e-18)
# adjust solver's time step bounds
setv('mindt', 1e-22)
run(1.0e-11)
# print runtime statistics
printstats()
sync()
| godsic/hotspin | examples/heat-diffusion.py | Python | gpl-3.0 | 1,484 |
#!/usr/bin/env python
#Copyright (C) 2009-2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import os
import xml.etree.ElementTree as ET
from sonLib.bioio import TestStatus
from sonLib.bioio import getLogLevelString
from cactus.shared.test import getCactusInputs_random
from cactus.shared.test import getCactusInputs_blanchette
from cactus.shared.test import runWorkflow_multipleExamples
from cactus.shared.test import silentOnSuccess
from cactus.shared.common import cactusRootPath
from sonLib.bioio import getTempFile
from cactus.shared.common import cactus_call
class TestCase(unittest.TestCase):
@unittest.skip('too slow')
@silentOnSuccess
def testCactus_Random_Greedy(self):
testCactus_Random(self, "greedy")
@unittest.skip('too slow')
@silentOnSuccess
def testCactus_Random_Blossum(self):
testCactus_Random(self, "blossom5")
@unittest.skip('too slow')
@silentOnSuccess
def testCactus_Random_MaxCardinality(self):
testCactus_Random(self, "maxCardinality")
@unittest.skip('too slow')
@silentOnSuccess
def testCactus_Random_MaxWeight(self):
testCactus_Random(self, "maxWeight")
@unittest.skip('too slow')
@silentOnSuccess
def testCactus_Blanchette_Blossum(self):
testCactus_Blanchette(self, "blossom5")
def testCuTest(self):
cactus_call(parameters=["referenceTests", getLogLevelString()])
def testCactus_Blanchette(self, matchingAlgorithm):
configFile = getConfigFile(matchingAlgorithm)
runWorkflow_multipleExamples(getCactusInputs_blanchette,
testRestrictions=(TestStatus.TEST_SHORT,), inverseTestRestrictions=True,
buildReference=True,
configFile=configFile)
os.remove(configFile)
def testCactus_Random(self, matchingAlgorithm):
configFile = getConfigFile(matchingAlgorithm)
runWorkflow_multipleExamples(getCactusInputs_random,
testNumber=TestStatus.getTestSetup(),
buildReference=True,
configFile=configFile)
os.remove(configFile)
def getConfigFile(matchingAlgorithm="greedy"):
tempConfigFile = getTempFile(rootDir="./", suffix=".xml")
config = ET.parse(os.path.join(cactusRootPath(), "cactus_progressive_config.xml")).getroot()
#Set the matching algorithm
config.find("reference").attrib["matching_algorithm"] = matchingAlgorithm
#Now print the file..
fileHandle = open(tempConfigFile, 'w')
ET.ElementTree(config).write(fileHandle)
fileHandle.close()
return os.path.abspath(tempConfigFile)
if __name__ == '__main__':
unittest.main()
| benedictpaten/cactus | src/cactus/reference/cactus_referenceTest.py | Python | mit | 2,803 |
import copy
import inspect
import os
import sys
import unittest
import uuid
rootDirectory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if rootDirectory not in sys.path:
sys.path.insert(0, rootDirectory)
from oxford.emotion import Emotion
class TestEmotion(unittest.TestCase):
'''Tests the project oxford Emotion API self.client'''
@classmethod
def setUpClass(cls):
# set up self.client for tests
cls.client = Emotion(os.environ['OXFORD_EMOTION_API_KEY'])
cls.localFilePrefix = os.path.join(rootDirectory, 'tests', 'images')
# set common recognize options
cls.recognizeOptions = {
'faceRectangles': ''
}
#
# test the recognize API
#
def _verifyRecognize(self, recognizeResult):
for emotionResult in recognizeResult:
self.assertIsInstance(emotionResult['faceRectangle'], object, 'face rectangle is returned')
scores = emotionResult['scores']
self.assertIsInstance(scores, object, 'scores are returned')
def test_emotion_recognize_url(self):
options = copy.copy(self.recognizeOptions)
options['url'] = 'https://upload.wikimedia.org/wikipedia/commons/1/19/Bill_Gates_June_2015.jpg'
recognizeResult = self.client.recognize(options)
self._verifyRecognize(recognizeResult)
def test_emotion_recognize_file(self):
options = copy.copy(self.recognizeOptions)
options['path'] = os.path.join(self.localFilePrefix, 'face1.jpg')
recognizeResult = self.client.recognize(options)
self._verifyRecognize(recognizeResult)
def test_emotion_recognize_stream(self):
options = copy.copy(self.recognizeOptions)
with open(os.path.join(self.localFilePrefix, 'face1.jpg'), 'rb') as file:
options['stream'] = file.read()
recognizeResult = self.client.recognize(options)
self._verifyRecognize(recognizeResult)
def test_emotion_recognize_throws_invalid_options(self):
self.assertRaises(Exception, self.client.recognize, {})
| chidochipotle/oxford | tests/test_emotion.py | Python | mit | 2,094 |
""" The bellman ford algorithm for calculating single source shortest
paths - CLRS style """
graph = {
's' : {'t':6, 'y':7},
't' : {'x':5, 'z':-4, 'y':8 },
'y' : {'z':9, 'x':-3},
'z' : {'x':7, 's': 2},
'x' : {'t':-2}
}
INF = float('inf')
dist = {}
predecessor = {}
def initialize_single_source(graph, s):
for v in graph:
dist[v] = INF
predecessor[v] = None
dist[s] = 0
def relax(graph, u, v):
if dist[v] > dist[u] + graph[u][v]:
dist[v] = dist[u] + graph[u][v]
predecessor[v] = u
def bellman_ford(graph, s):
initialize_single_source(graph, s)
edges = [(u, v) for u in graph for v in graph[u].keys()]
number_vertices = len(graph)
for i in range(number_vertices-1):
for (u, v) in edges:
relax(graph, u, v)
for (u, v) in edges:
if dist[v] > dist[u] + graph[u][v]:
return False # there exists a negative cycle
return True
def get_distances(graph, s):
if bellman_ford(graph, s):
return dist
return "Graph contains a negative cycle"
print get_distances(graph, 's')
| Bernardinhouessou/Projets_Autres | Python-Projets/Scripts/Algorithms-master/dp/bellman_ford.py | Python | mit | 1,115 |
__author__ = 'zhyf'
import numpy as np
# import numpy as n
import sys
import getopt as opt
from util import *
from math import sqrt, ceil, floor
import os
from gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from options import *
from PIL import Image
import pickle
# import Image
class ShowNetError(Exception):
pass
class ShowPredction(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def get_gpus(self):
self.need_gpu = self.op.get_value('show_preds') or self.op.get_value('write_features')
if self.need_gpu:
ConvNet.get_gpus(self)
print 'finish_0'
def init_data_providers(self):
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
# self.train_data_provider = self.test_data_provider = Dummy()
print 'finish_1'
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
print 'finish_2'
def init_model_state(self):
#ConvNet.init_model_state(self)
if self.op.get_value('show_preds'):
self.sotmax_idx = self.get_layer_idx(self.op.get_value('show_preds'), check_type='softmax')
if self.op.get_value('write_features'):
self.ftr_layer_idx = self.get_layer_idx(self.op.get_value('write_features'))
print 'finish_3'
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
print 'finish_4'
# def file_ratio(self,filename):
# if filename.endswith(0.2)
def data_reading(self,pic_path):
zoom_ratio = 0.2
file1 = file('.//storage2//door//test//batches.meta', 'rb')
meta_cifa = cPickle.load(file1)
file1.close()
inputImage = Image.open(pic_path)
cut_path,cut_dir = os.path.split(pic_path)
cut_dir_name = cut_dir.split('.')
W_size,H_size = inputImage.size
print cut_path
path = cut_path+'\\'+cut_dir_name[0]+'\\'
dir_list = os.listdir(path)
door_posiztion = []
flag = '0'
for i in dir_list:
print i
data =[]
data_list=[]
label_list=[]
dir_path= path + i + "\\"
file_list = os.listdir(dir_path)
file_splite = i.split('.')
box_size_H = int(float(i)*H_size)
box_size_W = int(box_size_H/2)
step_size = int(box_size_W/10)
num_size_list = [box_size_W,box_size_H,step_size,(W_size-box_size_W)/step_size+1,(H_size-box_size_H)/step_size+1]
for j in file_list:
if j.endswith('.jpg'):
image_org_data = Image.open(dir_path+j)
image_data = image_org_data.resize((60,60),Image.ANTIALIAS)
try:
r, g, b = image_data.split()
reseqImage = list(r.getdata()) + list(g.getdata()) + list(b.getdata())
data_list.append(reseqImage)
# filepath,filename = os.path.split(pic_path)
# print filename,model.ClassNameTest(filename)
label_list.extend('0')
# print reseqImage
except:
return '1'
print len(data_list)
preds = n.zeros((len(data_list), 6), dtype=n.single)
data_array = np.array(data_list, dtype = np.float32)
T_data = data_array.T
data.append(n.require((T_data - meta_cifa['data_mean']), dtype=n.single, requirements='C'))
# filepath,filename = os.path.split(pic_path)
# print filename,model.ClassNameTest(filename)
data.append(n.require((np.array([label_list])),dtype=n.single, requirements='C'))
data.append(preds)
print data[0].shape,data[1].shape,data[2].shape
# print data
# temp = data[0]
# print temp.ndim
# print temp.shape,temp.size
self.libmodel.startFeatureWriter(data, self.sotmax_idx)
self.finish_batch()
try:
out_file = file(path_t+i, 'wb')
pickle.dump(data, out_file)
out_file.close()
except:
print 'can not save'
label_names = meta_cifa['label_names']
flag= '0'
temp_image = inputImage.copy()
for l in range(0,len(data_list)):
img_labels = sorted(zip(preds[l,:], label_names), key=lambda x: x[0])[-1:]
# print img_labels
if img_labels[0][1] == 'Right 14':
x = '5'
elif img_labels[0][1] == 'Right 18.4':
x = '4'
elif img_labels[0][1] == 'Front':
x = '3'
elif img_labels[0][1] == 'Left 18.4':
x = '2'
elif img_labels[0][1] == 'Left 14':
x = '1'
elif img_labels[0][1] == 'No door':
x = '0'
if not x == '0' and img_labels[0][0]>=0.85:
print x
flag=1
print num_size_list[-2],num_size_list[2],num_size_list
p_h_t = l/num_size_list[-2]*num_size_list[2]
p_w_t = (l-l/num_size_list[-2]*num_size_list[-2])*num_size_list[2]
p_h_b = p_h_t+num_size_list[1]
p_w_b = p_w_t+num_size_list[0]
door_posiztion.append([p_w_t,p_h_t,p_w_b,p_h_b,i,l,x,img_labels[0][1],img_labels[0][0]])
box_item =[(p_w_t,p_h_t,p_w_b,p_h_t+5),(p_w_t,p_h_t,p_w_t+5,p_h_b),(p_w_t,p_h_b-5,p_w_b,p_h_b),(p_w_b-5,p_h_t,p_w_b,p_h_b)]
for j in box_item:
temp_image.paste('white',j)
temp_image.save('.\\test_result\\'+i+'_'+str(l)+'_'+img_labels[0][1]+'.jpg')
try:
temp_image = None
except:
pass
else:
pass
finally:
pass
# for k in range(n+1,len(door_posiztion)):
# if door_posiztion[n][0] - door_posiztion[k][0]
# position = n.zeros(W_size,H_size)
# for n in range(0,len(door_posiztion)):
# position[door_posiztion[n][0],door_posiztion[n][1]] = 1
my_result = open('door_posiztion.txt', 'w')
for line in door_posiztion:
print >>my_result, line
my_result.close()
return flag
print 'test'
def show_predictions(self, pic_path):
data = []
zoom_ratio = 0.2
file1 = file('.//storage2//door//test//batches.meta', 'rb')
meta_cifa = cPickle.load(file1)
file1.close()
data_size = int(sqrt((meta_cifa['data_mean'].size)/3))
# print data_size
inputImage = Image.open(pic_path)
W_size,H_size = inputImage.size
inputImage = inputImage.resize((int(W_size*zoom_ratio),int(H_size*zoom_ratio)),Image.ANTIALIAS)
print W_size,H_size
box_list = [0.9,0.8,0.7,0.6,0.5,0.4]
data_list=[]
label_list=[]
num_size_list=[]
filepath,filename = os.path.split(pic_path)
for i in box_list:
print i
box_size_H = int(H_size*i)
box_size_W = box_size_H / 2
step_size = box_size_W / 10
num_size_list.append([box_size_W,box_size_H,step_size,int((W_size-box_size_W)/step_size)+1,int((H_size-box_size_H)/step_size)+1])
for j in range(0,int((H_size-box_size_H)/step_size)+1):
for k in range(0,int((W_size-box_size_W)/step_size)+1):
box = (step_size*j,step_size*k,(j+1)*box_size_W,(k+1)*box_size_H)
region = inputImage.crop(box)
small_image = region.resize((data_size, data_size),Image.ANTIALIAS)
try:
r, g, b = small_image.split()
reseqImage = list(r.getdata()) + list(g.getdata()) + list(b.getdata())
data_list.append(reseqImage)
# filepath,filename = os.path.split(pic_path)
# print filename,model.ClassNameTest(filename)
label_list.extend('0')
# print reseqImage
except:
return '1'
# pixel_image = []
# for pixel in reseqImage:
# pixel_image.append([pixel])
print len(data_list)
preds = n.zeros((len(data_list), 6), dtype=n.single)
data_array = np.array(data_list, dtype = np.float32)
T_data = data_array.T
data.append(n.require((T_data - meta_cifa['data_mean']), dtype=n.single, requirements='C'))
# filepath,filename = os.path.split(pic_path)
# print filename,model.ClassNameTest(filename)
data.append(n.require((np.array(label_list)), dtype=n.single, requirements='C'))
data.append(preds)
try:
filename_list=filename.split('.')
out_file = file(filename_list[0], 'wb')
pickle.dump(data, out_file)
out_file.close()
except:
print 'can not save'
# print data
# temp = data[0]
# print temp.ndim
# print temp.shape,temp.size
self.libmodel.startFeatureWriter(data, self.sotmax_idx)
self.finish_batch()
label_names = meta_cifa['label_names']
flag= '0'
for i in range(0,len(data_list)):
img_labels = sorted(zip(preds[i,:], label_names), key=lambda x: x[0])[-1:]
# print img_labels
if img_labels[0][1] == 'Right 14':
x = '5'
elif img_labels[0][1] == 'Right 18.4':
x = '4'
elif img_labels[0][1] == 'Front':
x = '3'
elif img_labels[0][1] == 'Left 18.4':
x = '2'
elif img_labels[0][1] == 'Left 14':
x = '1'
elif img_labels[0][1] == 'No door':
x = '0'
if not x == '0':
p_h_t = i/num_size_list[i][-2]*num_size_list[i][2]
p_w_t = (i-door_position_H*num_size_list[i][-2])*num_size_list[i][2]
p_h_b += num_size_list[i][1]
p_w_b += num_size_list[i][0]
box_item =[(p_w_t,p_h_t,p_w_b,p_h_t+2),(p_w_t,p_h_t,p_w_t+2,p_h_b),(p_w_t,p_h_b-2,p_w_b,p_h_b),(p_w_b-2,p_h_t,p_w_b,p_h_b)]
for j in box_item:
inputImage.paste('white',j)
inputImage.save('G:\\jinshan\\test_result\\002\\11'+img_labels[0][1]+'.jpg')
flag=x
print x
return flag
print 'finish_5'
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'train_batch_range', 'test_batch_range'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
op.options['load_file'].default = None
op.options['load_file'].value_given = False
return op
print 'finish_6'
def ClassNameTest(self,item):
if item.endswith("testr2p.jpg"):
return '5'
elif item.endswith("testr1p.jpg"):
return '4'
elif item.endswith("test0p.jpg"):
return '3'
elif item.endswith("testl2p.jpg"):
return '2'
elif item.endswith("testl1p.jpg"):
return '1'
else:
return '0'
print 'finish_7'
# def ClassNameTest(item):
# if item.endswith("testp.jpg"):
# return [1]
# else:
# return [0]
def show_predict_dir(load_path):
try:
error = 0
file_list = os.listdir(load_path)
result = []
# class_file = file('./baidu_result/train-origin-pics-labels.txt', 'rb').readlines()
i=0
P_num=0
for item in file_list:
i=i+1;
# print item
if item.endswith('.JPG'):
# picture_number = item[0:len(item)-4]
# picture_index = int(picture_number) - 1
# if picture_index % 1000 == 0:
# print picture_index
n = os.path.join(load_path, item)
print item
# door = model.show_predictions(n)
door = model.data_reading(n)
# for l_lable in door_label:
# result.append(item + ';' +model.ClassNameTest(item)+';'+ door + '\n')
ground_truth=model.ClassNameTest(item)
print ground_truth,door
if not model.ClassNameTest(item)=='0':
P_num +=1
if not door == ground_truth:
error += 1
erro_ratio = float(error)/i
print erro_ratio
print i,P_num,len(result),error
# result.append('error_ratio:'+str(erro_ratio)+' Positive_num:'+str(P_num)+' total_num:'+str(i))
# myreslut = sorted(result, key=lambda result:result[0])
# if P_num<2000:
# my_result = file('myresult_p.txt', 'wb')
# else:
# my_result = file('myresult_n.txt', 'wb')
# my_result.writelines(myreslut)
# my_result.close()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
print 'finish_8'
op = ShowPredction.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowPredction(op, load_dic)
print os.path.exists("G:\\door_data_sampling\\posture\\data_pos\\test\\test_value_p\\")
show_predict_dir('G:\\door_data_sampling\\posture\\test\\org_data\\') | wells-chen/Based-on-Machine-learning-for-Door-recongnition | Trainning for door/show_test_pic.py | Python | gpl-2.0 | 15,738 |
# Authors: Karl MacMillan <kmacmillan@mentalrootkit.com>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import string
import itertools
# OVERVIEW
#
# This file contains objects and functions used to represent the reference
# policy (including the headers, M4 macros, and policy language statements).
#
# This representation is very different from the semantic representation
# used in libsepol. Instead, it is a more typical abstract representation
# used by the first stage of compilers. It is basically a parse tree.
#
# This choice is intentional as it allows us to handle the unprocessed
# M4 statements - including the $1 style arguments - and to more easily generate
# the data structures that we need for policy generation.
#
# Constans for referring to fields
SRC_TYPE = 0
TGT_TYPE = 1
OBJ_CLASS = 2
PERMS = 3
ROLE = 4
DEST_TYPE = 5
# String represenations of the above constants
field_to_str = ["source", "target", "object", "permission", "role", "destination" ]
str_to_field = { "source" : SRC_TYPE, "target" : TGT_TYPE, "object" : OBJ_CLASS,
"permission" : PERMS, "role" : ROLE, "destination" : DEST_TYPE }
# Base Classes
class PolicyBase:
def __init__(self, parent=None):
self.parent = None
self.comment = None
class Node(PolicyBase):
"""Base class objects produced from parsing the reference policy.
The Node class is used as the base class for any non-leaf
object produced by parsing the reference policy. This object
should contain a reference to its parent (or None for a top-level
object) and 0 or more children.
The general idea here is to have a very simple tree structure. Children
are not separated out by type. Instead the tree structure represents
fairly closely the real structure of the policy statements.
The object should be iterable - by default over all children but
subclasses are free to provide additional iterators over a subset
of their childre (see Interface for example).
"""
def __init__(self, parent=None):
PolicyBase.__init__(self, parent)
self.children = []
def __iter__(self):
return iter(self.children)
# Not all of the iterators will return something on all Nodes, but
# they won't explode either. Putting them here is just easier.
# Top level nodes
def nodes(self):
return itertools.ifilter(lambda x: isinstance(x, Node), walktree(self))
def modules(self):
return itertools.ifilter(lambda x: isinstance(x, Module), walktree(self))
def interfaces(self):
return itertools.ifilter(lambda x: isinstance(x, Interface), walktree(self))
def templates(self):
return itertools.ifilter(lambda x: isinstance(x, Template), walktree(self))
def support_macros(self):
return itertools.ifilter(lambda x: isinstance(x, SupportMacros), walktree(self))
# Common policy statements
def module_declarations(self):
return itertools.ifilter(lambda x: isinstance(x, ModuleDeclaration), walktree(self))
def interface_calls(self):
return itertools.ifilter(lambda x: isinstance(x, InterfaceCall), walktree(self))
def avrules(self):
return itertools.ifilter(lambda x: isinstance(x, AVRule), walktree(self))
def typerules(self):
return itertools.ifilter(lambda x: isinstance(x, TypeRule), walktree(self))
def typeattributes(self):
"""Iterate over all of the TypeAttribute children of this Interface."""
return itertools.ifilter(lambda x: isinstance(x, TypeAttribute), walktree(self))
def requires(self):
return itertools.ifilter(lambda x: isinstance(x, Require), walktree(self))
def roles(self):
return itertools.ifilter(lambda x: isinstance(x, Role), walktree(self))
def __str__(self):
if self.comment:
return str(self.comment) + "\n" + self.to_string()
else:
return self.to_string()
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
def to_string(self):
return ""
class Leaf(PolicyBase):
def __init__(self, parent=None):
PolicyBase.__init__(self, parent)
def __str__(self):
if self.comment:
return str(self.comment) + "\n" + self.to_string()
else:
return self.to_string()
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__, self.to_string())
def to_string(self):
return ""
# Utility functions
def walktree(node, depthfirst=True, showdepth=False, type=None):
"""Iterate over a Node and its Children.
The walktree function iterates over a tree containing Nodes and
leaf objects. The iteration can perform a depth first or a breadth
first traversal of the tree (controlled by the depthfirst
paramater. The passed in node will be returned.
This function will only work correctly for trees - arbitrary graphs
will likely cause infinite looping.
"""
# We control depth first / versus breadth first by
# how we pop items off of the node stack.
if depthfirst:
index = -1
else:
index = 0
stack = [(node, 0)]
while len(stack) > 0:
cur, depth = stack.pop(index)
if showdepth:
yield cur, depth
else:
yield cur
# If the node is not a Node instance it must
# be a leaf - so no need to add it to the stack
if isinstance(cur, Node):
items = []
i = len(cur.children) - 1
while i >= 0:
if type is None or isinstance(cur.children[i], type):
items.append((cur.children[i], depth + 1))
i -= 1
stack.extend(items)
def walknode(node, type=None):
"""Iterate over the direct children of a Node.
The walktree function iterates over the children of a Node.
Unlike walktree it does note return the passed in node or
the children of any Node objects (that is, it does not go
beyond the current level in the tree).
"""
for x in node:
if type is None or isinstance(x, type):
yield x
def list_to_space_str(s, cont=('{', '}')):
"""Convert a set (or any sequence type) into a string representation
formatted to match SELinux space separated list conventions.
For example the list ['read', 'write'] would be converted into:
'{ read write }'
"""
l = len(s)
str = ""
if l < 1:
raise ValueError("cannot convert 0 len set to string")
str = " ".join(s)
if l == 1:
return str
else:
return cont[0] + " " + str + " " + cont[1]
def list_to_comma_str(s):
l = len(s)
if l < 1:
raise ValueError("cannot conver 0 len set to comma string")
return ", ".join(s)
# Basic SELinux types
class IdSet(set):
def __init__(self, list=None):
if list:
set.__init__(self, list)
else:
set.__init__(self)
self.compliment = False
def to_space_str(self):
return list_to_space_str(self)
def to_comma_str(self):
return list_to_comma_str(self)
class SecurityContext(Leaf):
"""An SELinux security context with optional MCS / MLS fields."""
def __init__(self, context=None, parent=None):
"""Create a SecurityContext object, optionally from a string.
Parameters:
[context] - string representing a security context. Same format
as a string passed to the from_string method.
"""
Leaf.__init__(self, parent)
self.user = ""
self.role = ""
self.type = ""
self.level = ""
if context is not None:
self.from_string(context)
def from_string(self, context):
"""Parse a string representing a context into a SecurityContext.
The string should be in the standard format - e.g.,
'user:role:type:level'.
Raises ValueError if the string is not parsable as a security context.
"""
fields = context.split(":")
if len(fields) < 3:
raise ValueError("context string [%s] not in a valid format" % context)
self.user = fields[0]
self.role = fields[1]
self.type = fields[2]
if len(fields) > 3:
# FUTURE - normalize level fields to allow more comparisons to succeed.
self.level = string.join(fields[3:], ':')
else:
self.level = ""
def __eq__(self, other):
"""Compare two SecurityContext objects - all fields must be exactly the
the same for the comparison to work. It is possible for the level fields
to be semantically the same yet syntactically different - in this case
this function will return false.
"""
return self.user == other.user and \
self.role == other.role and \
self.type == other.type and \
self.level == other.level
def to_string(self, default_level="s0"):
"""Return a string representing this security context.
By default, the string will contiain a MCS / MLS level
potentially from the default which is passed in if none was
set.
Arguments:
default_level - the default level to use if self.level is an
empty string.
Returns:
A string represening the security context in the form
'user:role:type:level'.
"""
fields = [self.user, self.role, self.type]
if self.level == "":
if default_level != "":
fields.append(default_level)
else:
fields.append(self.level)
return ":".join(fields)
class ObjectClass(Leaf):
"""SELinux object class and permissions.
This class is a basic representation of an SELinux object
class - it does not represent separate common permissions -
just the union of the common and class specific permissions.
It is meant to be convenient for policy generation.
"""
def __init__(self, name="", parent=None):
Leaf.__init__(self, parent)
self.name = name
self.perms = IdSet()
# Basic statements
class TypeAttribute(Leaf):
"""SElinux typeattribute statement.
This class represents a typeattribute statement.
"""
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.type = ""
self.attributes = IdSet()
def to_string(self):
return "typeattribute %s %s;" % (self.type, self.attributes.to_comma_str())
class Role(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.role = ""
self.types = IdSet()
def to_string(self):
return "role %s types %s;" % (self.role, self.types.to_comma_str())
class Type(Leaf):
def __init__(self, name="", parent=None):
Leaf.__init__(self, parent)
self.name = name
self.attributes = IdSet()
self.aliases = IdSet()
def to_string(self):
s = "type %s" % self.name
if len(self.aliases) > 0:
s = s + "alias %s" % self.aliases.to_space_str()
if len(self.attributes) > 0:
s = s + ", %s" % self.attributes.to_comma_str()
return s + ";"
class TypeAlias(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.type = ""
self.aliases = IdSet()
def to_string(self):
return "typealias %s alias %s;" % (self.type, self.aliases.to_space_str())
class Attribute(Leaf):
def __init__(self, name="", parent=None):
Leaf.__init__(self, parent)
self.name = name
def to_string(self):
return "attribute %s;" % self.name
# Classes representing rules
class AVRule(Leaf):
"""SELinux access vector (AV) rule.
The AVRule class represents all varieties of AV rules including
allow, dontaudit, and auditallow (indicated by the flags self.ALLOW,
self.DONTAUDIT, and self.AUDITALLOW respectively).
The source and target types, object classes, and perms are all represented
by sets containing strings. Sets are used to make it simple to add
strings repeatedly while avoiding duplicates.
No checking is done to make certain that the symbols are valid or
consistent (e.g., perms that don't match the object classes). It is
even possible to put invalid types like '$1' into the rules to allow
storage of the reference policy interfaces.
"""
ALLOW = 0
DONTAUDIT = 1
AUDITALLOW = 2
NEVERALLOW = 3
def __init__(self, av=None, parent=None):
Leaf.__init__(self, parent)
self.src_types = IdSet()
self.tgt_types = IdSet()
self.obj_classes = IdSet()
self.perms = IdSet()
self.rule_type = self.ALLOW
if av:
self.from_av(av)
def __rule_type_str(self):
if self.rule_type == self.ALLOW:
return "allow"
elif self.rule_type == self.DONTAUDIT:
return "dontaudit"
else:
return "auditallow"
def from_av(self, av):
"""Add the access from an access vector to this allow
rule.
"""
self.src_types.add(av.src_type)
if av.src_type == av.tgt_type:
self.tgt_types.add("self")
else:
self.tgt_types.add(av.tgt_type)
self.obj_classes.add(av.obj_class)
self.perms.update(av.perms)
def to_string(self):
"""Return a string representation of the rule
that is a valid policy language representation (assuming
that the types, object class, etc. are valie).
"""
return "%s %s %s:%s %s;" % (self.__rule_type_str(),
self.src_types.to_space_str(),
self.tgt_types.to_space_str(),
self.obj_classes.to_space_str(),
self.perms.to_space_str())
class TypeRule(Leaf):
"""SELinux type rules.
This class is very similar to the AVRule class, but is for representing
the type rules (type_trans, type_change, and type_member). The major
difference is the lack of perms and only and sing destination type.
"""
TYPE_TRANSITION = 0
TYPE_CHANGE = 1
TYPE_MEMBER = 2
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.src_types = IdSet()
self.tgt_types = IdSet()
self.obj_classes = IdSet()
self.dest_type = ""
self.rule_type = self.TYPE_TRANSITION
def __rule_type_str(self):
if self.rule_type == self.TYPE_TRANSITION:
return "type_transition"
elif self.rule_type == self.TYPE_CHANGE:
return "type_change"
else:
return "type_member"
def to_string(self):
return "%s %s %s:%s %s;" % (self.__rule_type_str(),
self.src_types.to_space_str(),
self.tgt_types.to_space_str(),
self.obj_classes.to_space_str(),
self.dest_type)
class RoleAllow(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.src_roles = IdSet()
self.tgt_roles = IdSet()
def to_string(self):
return "allow %s %s;" % (self.src_roles.to_comma_str(),
self.tgt_roles.to_comma_str())
class ModuleDeclaration(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.name = ""
self.version = ""
self.refpolicy = False
def to_string(self):
if self.refpolicy:
return "policy_module(%s, %s)" % (self.name, self.version)
else:
return "module %s %s;" % (self.name, self.version)
class Conditional(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
self.cond_expr = []
def to_string(self):
return "[If %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
class Bool(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.name = ""
self.state = False
def to_string(self):
s = "bool %s " % self.name
if s.state:
return s + "true"
else:
return s + "false"
class InitialSid(Leaf):
def __init(self, parent=None):
Leaf.__init__(self, parent)
self.name = ""
self.context = None
def to_string(self):
return "sid %s %s" % (self.name, str(self.context))
class GenfsCon(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.filesystem = ""
self.path = ""
self.context = None
def to_string(self):
return "genfscon %s %s %s" % (self.filesystem, self.path, str(self.context))
class FilesystemUse(Leaf):
XATTR = 1
TRANS = 2
TASK = 3
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.type = self.XATTR
self.filesystem = ""
self.context = None
def to_string(self):
s = ""
if self.type == XATTR:
s = "fs_use_xattr "
elif self.type == TRANS:
s = "fs_use_trans "
elif self.type == TASK:
s = "fs_use_task "
return "%s %s %s;" % (s, self.filesystem, str(self.context))
class PortCon(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.port_type = ""
self.port_number = ""
self.context = None
def to_string(self):
return "portcon %s %s %s" % (self.port_type, self.port_number, str(self.context))
class NodeCon(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.start = ""
self.end = ""
self.context = None
def to_string(self):
return "nodecon %s %s %s" % (self.start, self.end, str(self.context))
class NetifCon(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.interface = ""
self.interface_context = None
self.packet_context = None
def to_string(self):
return "netifcon %s %s %s" % (self.interface, str(self.interface_context),
str(self.packet_context))
# Reference policy specific types
def print_tree(head):
for node, depth in walktree(head, showdepth=True):
s = ""
for i in range(depth):
s = s + "\t"
print s + str(node)
class Headers(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
def to_string(self):
return "[Headers]"
class Module(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
def to_string(self):
return ""
class Interface(Node):
"""A reference policy interface definition.
This class represents a reference policy interface definition.
"""
def __init__(self, name="", parent=None):
Node.__init__(self, parent)
self.name = name
def to_string(self):
return "[Interface name: %s]" % self.name
class TunablePolicy(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
self.cond_expr = []
def to_string(self):
return "[Tunable Policy %s]" % list_to_space_str(self.cond_expr, cont=("", ""))
class Template(Node):
def __init__(self, name="", parent=None):
Node.__init__(self, parent)
self.name = name
def to_string(self):
return "[Template name: %s]" % self.name
class IfDef(Node):
def __init__(self, name="", parent=None):
Node.__init__(self, parent)
self.name = name
def to_string(self):
return "[Ifdef name: %s]" % self.name
class InterfaceCall(Leaf):
def __init__(self, ifname="", parent=None):
Leaf.__init__(self, parent)
self.ifname = ifname
self.args = []
self.comments = []
def matches(self, other):
if self.ifname != other.ifname:
return False
if len(self.args) != len(other.args):
return False
for a,b in zip(self.args, other.args):
if a != b:
return False
return True
def to_string(self):
s = "%s(" % self.ifname
i = 0
for a in self.args:
if isinstance(a, list):
str = list_to_space_str(a)
else:
str = a
if i != 0:
s = s + ", %s" % str
else:
s = s + str
i += 1
return s + ")"
class OptionalPolicy(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
def to_string(self):
return "[Optional Policy]"
class SupportMacros(Node):
def __init__(self, parent=None):
Node.__init__(self, parent)
self.map = None
def to_string(self):
return "[Support Macros]"
def __expand_perm(self, perm):
# Recursive expansion - the assumption is that these
# are ordered correctly so that no macro is used before
# it is defined
s = set()
if self.map.has_key(perm):
for p in self.by_name(perm):
s.update(self.__expand_perm(p))
else:
s.add(perm)
return s
def __gen_map(self):
self.map = {}
for x in self:
exp_perms = set()
for perm in x.perms:
exp_perms.update(self.__expand_perm(perm))
self.map[x.name] = exp_perms
def by_name(self, name):
if not self.map:
self.__gen_map()
return self.map[name]
def has_key(self, name):
if not self.map:
self.__gen_map()
return self.map.has_key(name)
class Require(Leaf):
def __init__(self, parent=None):
Leaf.__init__(self, parent)
self.types = IdSet()
self.obj_classes = { }
self.roles = IdSet()
self.bools = IdSet()
self.users = IdSet()
def add_obj_class(self, obj_class, perms):
p = self.obj_classes.setdefault(obj_class, IdSet())
p.update(perms)
def to_string(self):
s = []
s.append("require {")
for type in self.types:
s.append("\ttype %s;" % type)
for obj_class, perms in self.obj_classes.items():
s.append("\tclass %s %s;" % (obj_class, perms.to_space_str()))
for role in self.roles:
s.append("\trole %s;" % role)
for bool in self.bools:
s.append("\tbool %s;" % bool)
for user in self.users:
s.append("\tuser %s;" % user)
s.append("}")
# Handle empty requires
if len(s) == 2:
return ""
return "\n".join(s)
class ObjPermSet:
def __init__(self, name):
self.name = name
self.perms = set()
def to_string(self):
return "define(`%s', `%s')" % (self.name, self.perms.to_space_str())
class ClassMap:
def __init__(self, obj_class, perms):
self.obj_class = obj_class
self.perms = perms
def to_string(self):
return self.obj_class + ": " + self.perms
class Comment:
def __init__(self, l=None):
if l:
self.lines = l
else:
self.lines = []
def to_string(self):
# If there are no lines, treat this as a spacer between
# policy statements and return a new line.
if len(self.lines) == 0:
return ""
else:
out = []
for line in self.lines:
out.append("#" + line)
return "\n".join(out)
def merge(self, other):
if len(other.lines):
for line in other.lines:
if line != "":
self.lines.append(line)
def __str__(self):
return self.to_string()
| carlgao/lenga | images/lenny64-peon/usr/share/python-support/python-sepolgen/sepolgen/refpolicy.py | Python | mit | 24,797 |
from __future__ import absolute_import, division, print_function
import pandas as pd
from toolz import partial
from dask.base import compute
def _categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals, ordered=False)
return df
def categorize(df, columns=None, **kwargs):
"""
Convert columns of dataframe to category dtype
This aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = df.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [df[col].dropna().drop_duplicates() for col in columns]
values = compute(*distincts, **kwargs)
func = partial(_categorize_block, categories=dict(zip(columns, values)))
meta = func(df._meta)
return df.map_partitions(func, meta=meta)
| jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/dataframe/categorical.py | Python | mit | 1,158 |
#########################################################################
# Uniprot XML parser to parse phosphorylation info of proteins
#
# eg 29/07/2009
#########################################################################
#from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import iterparse
import TsvReader
def main():
file_name = "../data/disease/uniprot/humdisease.txt"
mim_to_mesh_values = get_mim_to_mesh(file_name)
print len(mim_to_mesh)
print mim_to_mesh["600807"]
return
from time import clock
parser = UniprotXMLParser("../data/Q12888.xml")
#parser = UniprotXMLParser("../../data/phosphorylation/uniprot/uniprot-phosphorylation-large-scale-analysis.xml")
#ids = parser.parse_ids()
#print map(len, ids)
#print ids[-1]
t1 = clock()
elements = parser.parse()
t2 = clock()
print len(elements), elements[-1]
print t2-t1
return
def get_uniprot_to_geneid(file_name, uniprot_ids=None, only_min=True, key_function=int):
"""
To parse HUMAN_9606_idmapping.dat file (trimmed to two columns) from Uniprot
only_min: Chooses the "min" defined by key_function used in min()
key_function: int (geneids) | len (gene symbols)
Creating the file
wget ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz
zgrep Gene_Name HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > uniprot_to_symbol.txt
zgrep GeneID HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > idmapping.tab
OR zcat HUMAN_9606_idmapping_selected.dat.gz | cut -f 1,3 > idmapping.tab
"""
uniprot_to_geneids = {}
#geneid_to_uniprots = {}
f = open(file_name)
f.readline()
for line in f:
uniprot, geneid = line.split("\t")
geneid = geneid.strip()
uniprot = uniprot.strip()
if geneid == "" or uniprot == "":
continue
if uniprot_ids is not None and uniprot not in uniprot_ids:
continue
#if only_min:
# geneid = min(geneid.split("; "), key=key_function)
#uniprot_to_geneids[uniprot] = geneid
uniprot_to_geneids.setdefault(uniprot, set()).add(geneid)
f.close()
if only_min:
uniprot_to_geneid = {}
for uniprot, geneids in uniprot_to_geneids.iteritems():
uniprot_to_geneid[uniprot] = min(geneids, key=key_function)
uniprot_to_geneids = uniprot_to_geneid
return uniprot_to_geneids
def get_uniprot_to_geneid_from_idmapping_file(file_name, uniprot_ids=None):
"""
To parse idmapping.tab from Uniprot
Useful for id mapping of non-human species
"""
parser = TsvReader.TsvReader(file_name, delim="\t", inner_delim=";")
column_to_index, id_to_values = parser.read(fields_to_include=["UniProtKB-AC", "GeneID (EntrezGene)"], keys_to_include=uniprot_ids, merge_inner_values=True)
uniprot_to_geneid = {}
for uniprot, values in id_to_values.iteritems():
for val in values:
geneid = val[column_to_index["geneid (entrezgene)"]]
#if uniprot in uniprot_to_geneid:
# print "multiple gene id", uniprot
#uniprot_to_geneid.setdefault(uniprot, set()).add(geneid)
uniprot_to_geneid[uniprot] = geneid
return uniprot_to_geneid
def get_mim_to_mesh(file_name):
"""
To parse humdisease.txt from Uniprot
"""
mim_to_mesh_values = {}
f = open(file_name)
line = f.readline()
while not line.startswith("ID"):
line = f.readline()
words = line.strip().split()
disease = " ".join(words[1:]).rstrip(".")
for line in f:
words = line.strip().split()
if words[0] == "ID":
disease = " ".join(words[1:]).rstrip(".")
if words[0] == "DR":
id_type = words[1].lower().rstrip(";")
if id_type == "mesh":
mesh = words[2].rstrip(".")
elif id_type == "mim":
mim = words[2].rstrip(";")
if line.startswith("//"):
#if mim in mim_to_mesh_values and mim_to_mesh_values[mim][1] == mesh:
#continue
#if mim in mim_to_mesh_values: print mim, mim_to_mesh_values[mim], disease, mesh
mim_to_mesh_values.setdefault(mim, []).append((disease, mesh))
f.close()
return mim_to_mesh_values
class UniprotXMLParser(object):
NS="{http://uniprot.org/uniprot}"
psiteDesc_to_psiteChar = { "Phosphoserine": "S",
"Phosphothreonine": "T",
"Phosphotyrosine": "Y",
"Phosphohistidine": "H" }
def __init__(self, filename):
self.file_name = filename
#self.etree = ElementTree()
return
def parse_ids_high_mem(self):
self.etree = ElementTree()
tree = self.etree.parse(self.file_name)
#ids = tree.findall(self.NS+"accession")
ids = []
sub_ids = None
for e in tree.getiterator():
if e.tag == self.NS+"entry":
if sub_ids is not None:
ids.append(sub_ids)
sub_ids = []
if e.tag == self.NS+"accession":
sub_ids.append(e.text)
ids.append(sub_ids)
return ids
def parse_ids(self):
ids = []
sub_ids = []
# get an iterable
context = iterparse(self.file_name, ["start", "end"])
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.next()
for (event, elem) in context:
if event == "end":
if elem.tag == self.NS+"accession":
sub_ids.append(elem.text)
if elem.tag == self.NS+"entry":
ids.append(sub_ids)
sub_ids = []
elem.clear()
root.clear()
return ids
def parse(self):
ignored_modification_types = set()
context = iterparse(self.file_name, ["start", "end"])
context = iter(context)
event, root = context.next()
elements = []
current_element = None
current_position = None
for (event, elem) in context:
if event == "start":
if elem.tag == self.NS+"entry":
current_element = UniprotXMLElement()
elif event == "end":
if elem.tag == self.NS+"accession":
current_element.add_id(elem.text)
elif elem.tag == self.NS+"organism":
db_elm = elem.find(self.NS+"dbReference") #only looks at sublevel - alternative: keep tag stack
if db_elm.get("type") == "NCBI Taxonomy":
current_element.set_tax(db_elm.get("id"))
elif elem.tag == self.NS+"feature" and elem.get("type") == "modified residue":
#print elem.getchildren()
#pos_elm = elem.find(self.NS+"position")
#if elem.get("status") == "probable":
# continue
for sub_elm in elem.getiterator():
if sub_elm.tag == self.NS+"position":
pos_elm = sub_elm
pos = pos_elm.get("position")
desc = elem.get("description")
vals = desc.split(";")
type = vals[0]
kinase = vals[1][vals[1].find("by")+2:].strip() if (len(vals) > 1) else None
if self.psiteDesc_to_psiteChar.has_key(type):
type = self.psiteDesc_to_psiteChar[type]
current_element.add_psite(pos, type, kinase)
else:
ignored_modification_types.add(type)
elif elem.tag == self.NS+"entry":
seq_elm = elem.find(self.NS+"sequence")
current_element.set_sequence(seq_elm.text)
elements.append(current_element)
elem.clear()
root.clear()
print "Ignored mofications: ", ignored_modification_types
return elements
class UniprotXMLElement(object):
def __init__(self):
self.ids = []
self.taxid = None
self.phosphosites = []
self.sequence = None
def add_id(self, id):
self.ids.append(id)
def set_tax(self, taxid):
self.taxid = taxid
def add_psite(self, pos, type=None, kinase=None):
self.phosphosites.append( (pos, type, kinase) )
def set_sequence(self, seq):
self.sequence = seq.replace("\n","")
def get_ids(self):
return self.ids
def get_tax(self):
return self.taxid
def get_psites(self):
return self.phosphosites
def get_sequence(self):
return self.sequence
def __repr__(self):
return "%s\t%s\t%s\t%s" % (self.ids, self.taxid, self.phosphosites, self.sequence)
if __name__ == "__main__":
main()
| quimaguirre/diana | diana/toolbox/parse_uniprot.py | Python | mit | 9,091 |
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import logging
import os
import platform
import random
import time
import copy
import pkg_resources
import networkx as nx
import numpy as np
from pyswarm import pso
from collections import defaultdict
from .utils.antichains import get_max_weighted_antichain
from ..common import dropdict, get_roots, DropType
logger = logging.getLogger(__name__)
DEBUG = 0
class SchedulerException(Exception):
pass
class Schedule(object):
"""
The scheduling solution with schedule-related properties
"""
def __init__(self, dag, max_dop):
self._dag = dag
self._max_dop = max_dop if type(max_dop) == int else max_dop.get('num_cpus', 1)
DAGUtil.label_schedule(self._dag)
self._lpl = DAGUtil.get_longest_path(self._dag, default_weight=0, show_path=True)
self._wkl = None
self._sma = None
@property
def makespan(self):
return self._lpl[1]
@property
def longest_path(self):
return self._lpl[0]
@property
def schedule_matrix(self):
"""
Return: a self._lpl x self._max_dop matrix
(X - time, Y - resource unit / parallel lane)
"""
if (self._sma is None):
G = self._dag
N = max(self.makespan, 1)
if (DEBUG):
lpl_str = []
lpl_c = 0
for lpn in self.longest_path:
ww = G.node[lpn].get('num_cpus', 0)
lpl_str.append("{0}({1})".format(lpn, ww))
lpl_c += ww
logger.debug("lpl: %s", " -> ".join(lpl_str))
logger.debug("lplt = %d", int(lpl_c))
M = self._max_dop
#print("N (makespan) is ", N, "M is ", M)
ma = np.zeros((M, N), dtype=int)
pr = np.zeros((M), dtype=int)
last_pid = -1
prev_n = None
topo_sort = nx.topological_sort(G)
for n in topo_sort:
node = G.node[n]
try:
stt = node['stt']
edt = node['edt']
except KeyError as ke:
raise SchedulerException("No schedule labels found: {0}".format(str(ke)))
if (edt == stt):
continue
if (prev_n in G.predecessors(n)):
curr_pid = last_pid
else:
found = None
for i in range(M):
if (pr[i] <= stt):
found = i
break
if (found is None):
raise SchedulerException("Cannot find a idle PID, max_dop provided: {0}, actual max_dop: {1}\n Graph: {2}".format(M,
'DAGUtil.get_max_dop(G)', G.nodes(data=True)))
#DAGUtil.get_max_dop(G), G.nodes(data=True)))
curr_pid = found
ma[curr_pid, stt:edt] = n
pr[curr_pid] = edt
last_pid = curr_pid
prev_n = n
self._sma = ma
#print(ma)
return self._sma
@property
def workload(self):
"""
Return: (integer)
the mean # of resource units per time unit consumed by the graph/partition
"""
if (self._wkl is None):
ma = self.schedule_matrix
c = []
for i in range(ma.shape[1]):
c.append(np.count_nonzero(ma[:,i]))
self._wkl = int(np.mean(np.array(c))) # since METIS only accepts integer
return self._wkl
@property
def efficiency(self):
"""
resource usage percentage (integer)
"""
return int(float(self.workload) / self._max_dop * 100)
class Partition(object):
"""
Logical partition, multiple (1 ~ N) of these can be placed onto a single
physical resource unit
Logical partition can be nested, and it somewhat resembles the `dlg.manager.drop_manager`
"""
def __init__(self, gid, max_dop):
"""
gid: cluster/partition id (string)
max_dop: maximum allowed degree of parallelism in this partition (int)
"""
self._gid = gid
self._dag = nx.DiGraph()
self._ask_max_dop = max_dop
self._max_antichains = None # a list of max (width) antichains
self._lpl = None
self._schedule = None
self._max_dop = None
self._parent_id = None
self._child_parts = None
self._tmp_merge_dag = None
self._tmp_new_ac = None
logger.debug("My dop = %r", self._ask_max_dop)
@property
def parent_id(self):
return self._parent_id
@parent_id.setter
def parent_id(self, value):
self._parent_id = value
@property
def partition_id(self):
return self._gid
@property
def schedule(self):
"""
Get the schedule assocaited with this partition
"""
if (self._schedule is None):
self._schedule = Schedule(self._dag, self._max_dop)
return self._schedule
def recompute_schedule(self):
self._schedule = None
return self.schedule
def can_merge(self, that):
if (self._max_dop + that._max_dop <= self._ask_max_dop):
return True
else:
return False
#TODO re-implement this performance hog!
#self._tmp_merge_dag = nx.compose(self._dag, that._dag)
#return DAGUtil.get_max_dop(self._tmp_merge_dag) <= self._ask_max_dop
def merge(self, that):
if (self._tmp_merge_dag is not None):
self._dag = self._tmp_merge_dag
self._tmp_merge_dag = None
else:
self._dag = nx.compose(self._dag, that._dag)
#self._max_dop
#TODO add this performance hog!
#self._max_antichains = None
def can_add(self, u, v, gu, gv):
"""
Check if nodes u and/or v can join this partition
A node may be rejected due to reasons such as: DoP overflow or
completion time deadline overdue, etc.
"""
uw = gu['weight']
vw = gv['weight']
if (len(self._dag.nodes()) == 0):
return (True, False, False)
unew = u not in self._dag.node
vnew = v not in self._dag.node
if (DEBUG):
slow_max = DAGUtil.get_max_antichains(self._dag)
fast_max = self._max_antichains
info = "Before: {0} - slow max: {1}, fast max: {2}, u: {3}, v: {4}, unew:{5}, vnew:{6}".format(self._dag.edges(),
slow_max, fast_max, u, v, unew, vnew)
logger.debug(info)
if (len(slow_max) != len(fast_max)):
raise SchedulerException("ERROR - {0}".format(info))
self._dag.add_node(u, weight=uw)
self._dag.add_node(v, weight=vw)
self._dag.add_edge(u, v)
if (unew and vnew):
mydop = DAGUtil.get_max_dop(self._dag)
else:
mydop = self.probe_max_dop(u, v, unew, vnew)
#TODO - put the following code in a unit test!
if (DEBUG):
mydop_slow = DAGUtil.get_max_dop(self._dag)#
if (mydop_slow != mydop):
err_msg = "u = {0}, v = {1}, unew = {2}, vnew = {3}".format(u, v, unew, vnew)
raise SchedulerException("{2}: mydop = {0}, mydop_slow = {1}".format(mydop, mydop_slow, err_msg))
ret = False if mydop > self._ask_max_dop else True
if (unew):
self.remove(u)
if (vnew):
self.remove(v)
return (ret, unew, vnew)
def add(self, u, v, gu, gv, sequential=False, global_dag=None):
"""
Add nodes u and/or v to the partition
if sequential is True, break antichains to sequential chains
"""
# if (self.partition_id == 180):
# logger.debug("u = ", u, ", v = ", v, ", partition = ", self.partition_id)
uw = gu['weight']
vw = gv['weight']
unew = u not in self._dag.node
vnew = v not in self._dag.node
self._dag.add_node(u, weight=uw, num_cpus=gu['num_cpus'])
self._dag.add_node(v, weight=vw, num_cpus=gv['num_cpus'])
self._dag.add_edge(u, v)
if (unew and vnew): # we know this is fast
self._max_antichains = DAGUtil.get_max_antichains(self._dag)
self._max_dop = 1
else:
if (sequential and (global_dag is not None)):
# break potential antichain to sequential chain
if (unew):
v_ups = nx.ancestors(self._dag, v)
for vup in v_ups:
if (u == vup):
continue
if (len(list(self._dag.predecessors(vup))) == 0):
# link u to "root" parent of v to break antichain
self._dag.add_edge(u, vup)
# change the original global graph
global_dag.add_edge(u, vup, weight=0)
if (not nx.is_directed_acyclic_graph(global_dag)):
global_dag.remove_edge(u, vup)
else:
u_downs = nx.descendants(self._dag, u)
for udo in u_downs:
if (udo == v):
continue
if (len(list(self._dag.successors(udo))) == 0):
# link "leaf" children of u to v to break antichain
self._dag.add_edge(udo, v)
# change the original global graph
global_dag.add_edge(udo, v, weight=0)
if (not nx.is_directed_acyclic_graph(global_dag)):
global_dag.remove_edge(udo, v)
self._max_dop = self.probe_max_dop(u, v, unew, vnew, update=True)
#self._max_dop = DAGUtil.get_max_dop(self._dag)# this is too slow!
def remove(self, n):
"""
Remove node n from the partition
"""
self._dag.remove_node(n)
def add_node(self, u, weight):
"""
Add a single node u to the partition
"""
self._dag.add_node(u, weight=weight)
self._max_dop = 1
def probe_max_dop(self, u, v, unew, vnew, update=False):
"""
An incremental antichain (which appears significantly more efficient than the networkx antichains)
But only works for DoP, not for weighted width
"""
if (self._max_antichains is None):
new_ac = DAGUtil.get_max_antichains(self._dag)
if (update):
self._max_antichains = new_ac
if (len(new_ac) == 0):
if (update):
self._max_antichains = None
return 0
else:
return len(new_ac[0])
else:
if (update and self._tmp_new_ac is not None):
self._max_antichains, md = self._tmp_new_ac
self._tmp_new_ac = None
return md
if (unew):
ups = nx.descendants(self._dag, u)
new_node = u
elif (vnew):
ups = nx.ancestors(self._dag, v)
new_node = v
else:
raise SchedulerException("u v are both new/old")
new_ac = []
md = 1
for ma in self._max_antichains: # missing elements in the current max_antichains!
#incremental updates
found = False
for n in ma:
if (n in ups):
found = True
break
if (not found):
mma = list(ma)
mma.append(new_node)
new_ac.append(mma)
if (len(mma) > md):
md = len(mma)
elif (len(ma) > md):
md = len(ma)
new_ac.append(ma) # carry over, then prune it
if (len(new_ac) > 0):
self._tmp_new_ac = (new_ac, md)
if (update):
self._max_antichains = new_ac
return md
else:
raise SchedulerException("No antichains")
@property
def cardinality(self):
return len(self._dag.nodes())
class KFamilyPartition(Partition):
"""
A special case (K = 1) of the Maximum Weighted K-families based on
the Theorem 3.1 in
http://fmdb.cs.ucla.edu/Treports/930014.pdf
"""
def __init__(self, gid, max_dop, global_dag=None):
"""
max_dop: dict with key: resource_attributes (string)
value: resource_capacity (integer)
"""
mtype = type(max_dop)
if (mtype == int):
# backward compatible
max_dop = {'num_cpus': max_dop}
elif (mtype == dict):
pass
else:
raise SchedulerException('Invalid max_dop type: %r' % mtype)
super(KFamilyPartition, self).__init__(gid, max_dop)
self._bpg = nx.DiGraph()
self._global_dag = global_dag
self._check_global_dag = global_dag is not None
self._w_attr = max_dop.keys()
self._tc = defaultdict(set)
self._tmp_max_dop = None
def add_node(self, u):
"""
Add a single node u to the partition
"""
kwargs = dict()
if (self._tmp_max_dop is None):
self._tmp_max_dop = dict()
self_global_dag = self._global_dag
for _w_attr in self._w_attr:
u_aw = self_global_dag.node[u].get(_w_attr, 1)
kwargs[_w_attr] = u_aw
kwargs['weight'] = self_global_dag.node[u].get('weight', 5)
self._dag.add_node(u, **kwargs)
for k in self._w_attr:
self._tmp_max_dop[k] = get_max_weighted_antichain(self._dag, w_attr=k)[0]
self._max_dop = self._tmp_max_dop
def can_merge(self, that, u, v):
"""
"""
dag = nx.compose(self._dag, that._dag)
if (u is not None ):
dag.add_edge(u, v)
tmp_max_dop = copy.deepcopy(self._tmp_max_dop)
for _w_attr in self._w_attr:
mydop = get_max_weighted_antichain(dag, w_attr=_w_attr)[0]
curr_max = max(self._max_dop[_w_attr], that._max_dop[_w_attr])
if (mydop <= curr_max):
# if you don't increase DoP, we accept that immediately
tmp_max_dop[_w_attr] = curr_max
elif (mydop > self._ask_max_dop[_w_attr]):
return False
else:
tmp_max_dop[_w_attr] = mydop
self._tmp_max_dop = tmp_max_dop # only change it when returning True
return True
def merge(self, that, u, v):
self._dag = nx.compose(self._dag, that._dag)
if (u is not None):
self._dag.add_edge(u, v)
if (self._tmp_max_dop is not None):
self._max_dop = self._tmp_max_dop
#print("Gid %d just merged with DoP %d" % (self._gid, self._tmp_max_dop))
else:
# we could recalcuate it again, but we are lazy!
raise SchedulerException("can_merge was not probed before add()")
class Scheduler(object):
"""
Static Scheduling consists of three steps:
1. partition the DAG into an optimal number (M) of partitions
goal - minimising execution time while maintaining intra-partition DoP
2. merge partitions into a given number (N) of partitions (if M > N)
goal - minimise logical communication cost while maintaining load balancing
3. map each merged partition to a resource unit
goal - minimise physical communication cost amongst resource units
"""
def __init__(self, drop_list, max_dop=8, dag=None):
"""
turn drop_list into DAG, and check its validity
"""
self._drop_list = drop_list
if (dag is None):
self._dag = DAGUtil.build_dag_from_drops(self._drop_list)
else:
self._dag = dag
self._max_dop = max_dop
self._parts = None # partitions
self._part_dict = dict() #{gid : part}
self._part_edges = [] # edges amongst all partitions
def partition_dag(self):
raise SchedulerException("Not implemented. Try subclass instead")
def merge_partitions(self, num_partitions, bal_cond=1):
"""
Merge M partitions into N partitions where N < M
implemented using METIS for now
bal_cond: load balance condition (integer):
0 - workload,
1 - CPU count (faster to evaluate than workload)
"""
# 1. build the bi-directional graph (each partition is a node)
metis = DAGUtil.import_metis()
G = nx.Graph()
st_gid = len(self._drop_list) + len(self._parts) + 1
if (bal_cond == 0):
G.graph['node_weight_attr'] = ['wkl', 'eff']
for part in self._parts:
sc = part.schedule
G.add_node(part.partition_id, wkl=sc.workload, eff=sc.efficiency)
else:
G.graph['node_weight_attr'] = 'cc'
for part in self._parts:
#sc = part.schedule
pdop = part._max_dop
#TODO add memory as one of the LB condition too
cc_eval = pdop if type(pdop) == int else pdop.get('num_cpus', 1)
G.add_node(part.partition_id, cc=cc_eval)
for e in self._part_edges:
u = e[0]
v = e[1]
ugid = self._dag.node[u].get('gid', None)
vgid = self._dag.node[v].get('gid', None)
G.add_edge(ugid, vgid) # repeating is fine
ew = self._dag.adj[u][v]['weight']
try:
G[ugid][vgid]['weight'] += ew
except KeyError:
G[ugid][vgid]['weight'] = ew
#DAGUtil.metis_part(G, 15)
# since METIS does not allow zero edge weight, reset them to one
for e in G.edges(data=True):
if (e[2]['weight'] == 0):
e[2]['weight'] = 1
#logger.debug(G.nodes(data=True))
(edgecuts, metis_parts) = metis.part_graph(G,
nparts=num_partitions,
ufactor=1)
for node, pt in zip(G.nodes(), metis_parts): # note min(pt) == 0
parent_id = pt + st_gid
child_part = self._part_dict[node]
child_part.parent_id = parent_id
#logger.debug("Part {0} --> Cluster {1}".format(child_part.partition_id, parent_id))
#parent_part = Partition(parent_id, None)
#self._parts.append(parent_part)
#logger.debug("Edgecuts of merged partitions: ", edgecuts)
return edgecuts
def map_partitions(self):
"""
map logical partitions to physical resources
"""
pass
class MySarkarScheduler(Scheduler):
"""
Based on "V. Sarkar, Partitioning and Scheduling Parallel Programs for Execution on
Multiprocessors. Cambridge, MA: MIT Press, 1989."
Main change
We do not order independent tasks within the same cluster. This could blow the cluster, therefore
we allow for a cost constraint on the number of concurrent tasks (e.g. # of cores) within each cluster
Why
1. we only need to topologically sort the DAG once since we do not add new edges in the cluster
2. closer to requirements
3. adjustable for local schedulers
Similar ideas:
http://stackoverflow.com/questions/3974731
"""
def __init__(self, drop_list, max_dop=8, dag=None, dump_progress=False):
super(MySarkarScheduler, self).__init__(drop_list, max_dop=max_dop, dag=dag)
self._sspace = [3] * len(self._dag.edges()) # all edges are not zeroed
self._dump_progress = dump_progress
def override_cannot_add(self):
"""
Whether this scheduler will override the False result from `Partition.can_add()`
"""
return False
def is_time_critical(self, u, uw, unew, v, vw, vnew, curr_lpl, ow, rem_el):
"""
:return: True
MySarkarScheduler always returns False
"""
logger.debug("MySarkar time criticality called")
return True
def _merge_two_parts(self, ugid, vgid,
u, v, gu, gv, g_dict, parts, G):
"""
Merge two parts associated with u and v respectively
Return: None if these two parts cannot be merged
due to reasons such as DoP overflow
A ``Part`` instance
"""
# get the new part should we go ahead
# the new part should be one of partu or partv
#print("\nMerging ugid %d and vgid %d, u %d and v %d" % (ugid, vgid, u, v))
l_gid = min(ugid, vgid)
r_gid = max(ugid, vgid)
part_new = g_dict[l_gid]
part_removed = g_dict[r_gid]
if (not part_new.can_merge(part_removed, u, v)):
return None
part_new.merge(part_removed, u, v)
# Get hold of all gnodes that belong to "part_removed"
# and re-assign them to the new partitions
for n in part_removed._dag.nodes():
G.node[n]['gid'] = l_gid
index = None
for i, part in enumerate(parts):
p_gid = part._gid
if (p_gid > r_gid):
g_dict[p_gid - 1] = part
part._gid -= 1
for n in part._dag.nodes():
G.node[n]['gid'] = part._gid
elif (p_gid == r_gid):
#index = len(parts) - i - 1
index = i
del g_dict[p_gid]
if (index is None):
raise SchedulerException("Failed to find r_gid")
parts[:] = parts[0:index] + parts[index + 1:]
return part_new
def reduce_partitions(self, parts, g_dict, G):
"""
further reduce the number of partitions by merging partitions whose max_dop
is less than capacity
step 1 - sort partition list based on their
_max_dop of num_cpus as default
step 2 - enumerate each partition p to see merging
between p and its neighbour is feasible
"""
done_reduction = False
num_reductions = 0
#TODO consider other w_attrs other than CPUs!
parts.sort(key=lambda x: x._max_dop['num_cpus'])
while (not done_reduction):
for i, partA in enumerate(parts):
if (i < len(parts) - 1):
partB = parts[i + 1]
new_part = self._merge_two_parts(partA._gid, partB._gid, None, None,
None, None, g_dict, parts, G)
if (new_part is not None):
num_reductions += 1
break # force re-sorting
else:
done_reduction = True
logger.info('Performed reductions %d times', num_reductions)
break
def partition_dag(self):
"""
Return a tuple of
1. the # of partitions formed (int)
2. the parallel time (longest path, int)
3. partition time (seconds, float)
"""
G = self._dag
st_gid = len(self._drop_list) + 1
init_c = st_gid
el = sorted(G.edges(data=True), key=lambda ed: ed[2]['weight'] * -1)
stt = time.time()
topo_sorted = nx.topological_sort(G)
g_dict = self._part_dict#dict() #{gid : Partition}
curr_lpl = None
parts = []
plots_data = []
dump_progress = self._dump_progress
for n in G.nodes(data=True):
n[1]['gid'] = st_gid
part = KFamilyPartition(st_gid, self._max_dop, global_dag=G)
part.add_node(n[0])
g_dict[st_gid] = part
parts.append(part) # will it get rejected?
st_gid += 1
for i, e in enumerate(el):
u = e[0]
gu = G.node[u]
v = e[1]
gv = G.node[v]
ow = G.adj[u][v]['weight']
G.adj[u][v]['weight'] = 0 #edge zeroing
ugid = gu.get('gid', None)
vgid = gv.get('gid', None)
if (ugid != vgid): # merge existing parts
part = self._merge_two_parts(ugid, vgid,
u, v, gu, gv, g_dict, parts, G)
if (part is not None):
st_gid -= 1
self._sspace[i] = 1
else:
G.adj[u][v]['weight'] = ow
self._part_edges.append(e)
if (dump_progress):
bb = np.median([pp._tmp_max_dop for pp in parts])
curr_lpl = DAGUtil.get_longest_path(G, show_path=False,
topo_sort=topo_sorted)[1]
plots_data.append('%d,%d,%d' % (curr_lpl, len(parts), bb))
self.reduce_partitions(parts, g_dict, G)
edt = time.time() - stt
self._parts = parts
if (dump_progress):
with open('/tmp/%.3f_lpl_parts.csv' % time.time(), 'w') as of:
of.writelines(os.linesep.join(plots_data))
if (curr_lpl is None):
curr_lpl = DAGUtil.get_longest_path(G, show_path=False,
topo_sort=topo_sorted)[1]
return ((st_gid - init_c), curr_lpl, edt, parts)
class MinNumPartsScheduler(MySarkarScheduler):
"""
A special type of partition that aims to schedule the DAG on time but at minimum cost.
In this particular case, the cost is the number of partitions that will be generated.
The assumption is # of partitions (with certain DoP) more or less represents resource footprint.
"""
def __init__(self, drop_list, deadline, max_dop=8, dag=None, optimistic_factor=0.5):
super(MinNumPartsScheduler, self).__init__(drop_list, max_dop=max_dop, dag=dag)
self._deadline = deadline
self._optimistic_factor = optimistic_factor
def override_cannot_add(self):
return True
def is_time_critical(self, u, uw, unew, v, vw, vnew, curr_lpl, ow, rem_el):
"""
This is called ONLY IF either can_add on partition has returned "False"
or the new critical path is longer than the old one at each iteration
Parameters:
u - node u, v - node v, uw - weight of node u, vw - weight of node v
curr_lpl - current longest path length, ow - current edge weight
rem_el - remainig edges to be zeroed
ow - original edge length
Returns:
Boolean
It looks ahead to compute the probability of time being critical
and compares that with the _optimistic_factor
probility = (num of edges need to be zeroed to meet the deadline) /
(num of remaining unzeroed edges)
"""
if (unew and vnew):
return True
# compute time criticality probility
ttlen = float(len(rem_el))
if (ttlen == 0):
return False
c = 0
for i, e in enumerate(rem_el):
c = i
edge_weight = self._dag.edge[e[0]][e[1]]['weight']
if ((curr_lpl - edge_weight) <= self._deadline):
break
# probability that remaining edges will be zeroed in order to meet the deadline
prob = (c + 1) / ttlen
time_critical = True if (prob > self._optimistic_factor) else False
#print "time criticality is {0}, prob is {1}".format(time_critical, prob)
return time_critical
# if (time_critical):
# # enforce sequentialisation
# # see Figure 3 in
# # Gerasoulis, A. and Yang, T., 1993. On the granularity and clustering of directed acyclic task graphs.
# # Parallel and Distributed Systems, IEEE Transactions on, 4(6), pp.686-701.
# #TODO 1. formal proof: u cannot be the leaf node in the partition otherwise ca would have been true
# #TODO 2. check if this is on the critical path at all?
# nw = uw if unew else vw
# return (ow >= nw) # assuming "stay out of partition == parallelism"
# else: # join the partition to minimise num_part
# return True
class PSOScheduler(Scheduler):
"""
Use the Particle Swarm Optimisation to guide the Sarkar algorithm
https://en.wikipedia.org/wiki/Particle_swarm_optimization
The idea is to let "edgezeroing" becomes the search variable X
The number of dimensions of X is the number of edges in DAG
Possible values for each dimension is a discrete set {1, 2, 3}
where:
* 10 - no zero (2 in base10) + 1
* 00 - zero w/o linearisation (0 in base10) + 1
* 01 - zero with linearisation (1 in base10) + 1
if (deadline is present):
the objective function sets up a partition scheme such that
(1) DoP constrints for each partiiton are satisfied
based on X[i] value, reject or linearisation
(2) returns num_of_partitions
constrain function:
1. makespan < deadline
else:
the objective function sets up a partition scheme such that
(1) DoP constrints for each partiiton are satisfied
based on X[i] value, reject or linearisation
(2) returns makespan
"""
def __init__(self, drop_list, max_dop=8, dag=None, deadline=None, topk=30, swarm_size=40):
super(PSOScheduler, self).__init__(drop_list, max_dop=max_dop, dag=dag)
self._deadline = deadline
#search space: key - combination of X[i] (string),
# val - a tuple of (critical_path (int), num_parts (int))
self._sspace_dict = dict()
self._topk = topk
self._swarm_size = swarm_size
self._lite_dag = DAGUtil.build_dag_from_drops(self._drop_list, embed_drop=False)
self._call_counts = 0
leng = len(self._lite_dag.edges())
self._leng = leng
self._topk = leng if self._topk is None or leng < self._topk else self._topk
def partition_dag(self):
"""
Returns a tuple of:
1. the # of partitions formed (int)
2. the parallel time (longest path, int)
3. partition time (seconds, float)
4. a list of partitions (Partition)
"""
# trigger the PSO algorithm
G = self._dag
lb = [0.99] * self._leng
ub = [3.01] * self._leng
stt = time.time()
if (self._deadline is None):
xopt, fopt = pso(self.objective_func, lb, ub, swarmsize=self._swarm_size)
else:
xopt, fopt = pso(self.objective_func, lb, ub, ieqcons=[self.constrain_func], swarmsize=self._swarm_size)
curr_lpl, num_parts, parts, g_dict = self._partition_G(G, xopt)
#curr_lpl, num_parts, parts, g_dict = self.objective_func(xopt)
self._part_dict = g_dict
edt = time.time()
#print "PSO scheduler took {0} seconds".format(edt - stt)
st_gid = len(self._drop_list) + 1 + num_parts
for n in G.nodes(data=True):
if not 'gid' in n[1]:
n[1]['gid'] = st_gid
part = Partition(st_gid, self._max_dop)
part.add_node(n[0], n[1].get('weight', 1))
g_dict[st_gid] = part
parts.append(part) # will it get rejected?
num_parts += 1
self._parts = parts
#print "call counts ", self._call_counts
return (num_parts, curr_lpl, edt - stt, parts)
def _partition_G(self, G, x):
"""
A helper function to partition G based on a given scheme x
subject to constraints imposed by each partition's DoP
"""
#print x
st_gid = len(self._drop_list) + 1
init_c = st_gid
el = sorted(G.edges(data=True), key=lambda ed: ed[2]['weight'] * -1)
#topo_sorted = nx.topological_sort(G)
#g_dict = self._part_dict#dict() #{gid : Partition}
g_dict = dict()
parts = []
for i, e in enumerate(el):
pos = int(round(x[i]))
if (pos == 3): #10 non_zero + 1
continue
elif (pos == 2):#01 zero with linearisation + 1
linear = True
elif (pos == 1): #00 zero without linearisation + 1
linear = False
else:
raise SchedulerException("PSO position out of bound: {0}".format(pos))
u = e[0]
gu = G.node[u]
v = e[1]
gv = G.node[v]
ow = G.adj[u][v]['weight']
G.adj[u][v]['weight'] = 0 #edge zeroing
recover_edge = False
ugid = gu.get('gid', None)
vgid = gv.get('gid', None)
if (ugid and (not vgid)):
part = g_dict[ugid]
elif ((not ugid) and vgid):
part = g_dict[vgid]
elif (not ugid and (not vgid)):
part = Partition(st_gid, self._max_dop)
g_dict[st_gid] = part
parts.append(part) # will it get rejected?
st_gid += 1
else: #elif (ugid and vgid):
# cannot change Partition once is in!
part = None
#uw = gu['weight']
#vw = gv['weight']
if (part is None):
recover_edge = True
else:
ca, unew, vnew = part.can_add(u, v, gu, gv)
if (ca):
# ignore linear flag, add it anyway
part.add(u, v, gu, gv)
gu['gid'] = part._gid
gv['gid'] = part._gid
else:
if (linear):
part.add(u, v, gu, gv, sequential=True, global_dag=G)
gu['gid'] = part._gid
gv['gid'] = part._gid
else:
recover_edge = True #outright rejection
if (recover_edge):
G.adj[u][v]['weight'] = ow
self._part_edges.append(e)
self._call_counts += 1
#print "called {0} times, len parts = {1}".format(self._call_counts, len(parts))
return (DAGUtil.get_longest_path(G, show_path=False)[1], len(parts), parts, g_dict)
def constrain_func(self, x):
"""
Deadline - critical_path >= 0
"""
if (self._deadline is None):
raise SchedulerException("Deadline is None, cannot apply constraints!")
sk = ''.join([str(int(round(xi))) for xi in x[0:self._topk]])
stuff = self._sspace_dict.get(sk, None)
if (stuff is None):
G = self._lite_dag.copy()
stuff = self._partition_G(G, x)
self._sspace_dict[sk] = stuff[0:2]
del G
return self._deadline - stuff[0]
def objective_func(self, x):
"""
x is a list of values, each taking one of the 3 integers: 0,1,2 for an edge
indices of x is identical to the indices in G.edges().sort(key='weight')
"""
# first check if the solution is already available in the search space
sk = ''.join([str(int(round(xi))) for xi in x[0:self._topk]])
stuff = self._sspace_dict.get(sk, None) #TODO is this atomic operation?
if (stuff is None):
# make a deep copy to avoid mix up multiple particles,
# each of which has multiple iterations
G = self._lite_dag.copy()
stuff = self._partition_G(G, x)
self._sspace_dict[sk] = stuff[0:2]
del G
if (self._deadline is None):
return stuff[0]
else:
return stuff[1]
class DAGUtil(object):
"""
Helper functions dealing with DAG
"""
@staticmethod
def get_longest_path(G, weight='weight', default_weight=1, show_path=True, topo_sort=None):
"""
Ported from:
https://github.com/networkx/networkx/blob/master/networkx/algorithms/dag.py
Added node weight
Returns the longest path in a DAG
If G has edges with 'weight' attribute the edge data are used as weight values.
:param: G Graph (NetworkX DiGraph)
:param: weight Edge data key to use for weight (string)
:param: default_weight The weight of edges that do not have a weight attribute (integer)
:return: a tuple with two elements: `path` (list), the longest path, and
`path_length` (float) the length of the longest path.
"""
dist = {} # stores {v : (length, u)}
if (topo_sort is None):
topo_sort = nx.topological_sort(G)
for v in topo_sort:
us = [
(dist[u][0] + #accumulate
data.get(weight, default_weight) + #edge weight
G.node[u].get(weight, 0) + # u node weight
(G.node[v].get(weight, 0) if len(list(G.successors(v))) == 0 else 0), # v node weight if no successor
u)
for u, data in G.pred[v].items()]
# Use the best predecessor if there is one and its distance is non-negative, otherwise terminate.
maxu = max(us) if us else (0, v)
dist[v] = maxu if maxu[0] >= 0 else (0, v)
u = None
v = max(dist, key=dist.get)
lp = dist[v][0]
if (not show_path):
path = None
else:
path = []
while u != v:
path.append(v)
u = v
v = dist[v][1]
path.reverse()
return (path, lp)
@staticmethod
def get_max_width(G, weight='weight', default_weight=1):
"""
Get the antichain with the maximum "weighted" width of this DAG
weight: float (for example, it could be RAM consumption in GB)
Return : float
"""
max_width = 0
for antichain in nx.antichains(G):
t = 0
for n in antichain:
t += G.node[n].get(weight, default_weight)
if (t > max_width):
max_width = t
return max_width
@staticmethod
def get_max_dop(G):
"""
Get the maximum degree of parallelism of this DAG
return : int
"""
return max([len(antichain) for antichain in nx.antichains(G)])
"""
max_dop = 0
for antichain in nx.antichains(G):
leng = len(antichain)
if (leng > max_dop):
max_dop = leng
return max_dop
"""
@staticmethod
def get_max_antichains(G):
"""
return a list of antichains with Top-2 lengths
"""
return DAGUtil.prune_antichains(nx.antichains(G))
@staticmethod
def prune_antichains(antichains):
"""
Prune a list of antichains to keep those with Top-2 lengths
antichains is a Generator (not a list!)
"""
todo = []
for antichain in antichains:
todo.append(antichain)
todo.sort(key=lambda x : len(x), reverse=True)
return todo
@staticmethod
def label_schedule(G, weight='weight', topo_sort=None):
"""
for each node, label its start and end time
"""
if (topo_sort is None):
topo_sort = nx.topological_sort(G)
for v in topo_sort:
gv = G.node[v]
parents = list(G.predecessors(v))
if (len(parents) == 0):
gv['stt'] = 0
else:
# get the latest end time of one of its parents
ledt = -1
for parent in parents:
pedt = G.node[parent]['edt'] + G.adj[parent][v].get(weight, 0)
if (pedt > ledt):
ledt = pedt
gv['stt'] = ledt
gv['edt'] = gv['stt'] + gv.get(weight, 0)
@staticmethod
def ganttchart_matrix(G, topo_sort=None):
"""
Return a M (# of DROPs) by N (longest path length) matrix
"""
lpl = DAGUtil.get_longest_path(G, show_path=True)
#N = lpl[1] - (len(lpl[0]) - 1)
N = lpl[1]
M = G.number_of_nodes()
ma = np.zeros((M, N), dtype=np.int)
if (topo_sort is None):
topo_sort = nx.topological_sort(G)
for i, n in enumerate(topo_sort):
node = G.node[n]
try:
stt = node['stt']
edt = node['edt']
except KeyError as ke:
raise SchedulerException("No schedule labels found: {0}".\
format(str(ke)))
#print i, n, stt, edt
leng = edt - stt
if (edt == stt):
continue
try:
ma[i, stt:edt] = np.ones((1, leng))
except:
logger.error("i, stt, edt, leng = %d, %d, %d, %d", i, stt, edt, leng)
logger.error("N, M = %d, %d", M, N)
raise
#print ma[i, :]
return ma
@staticmethod
def import_metis():
try:
import metis as mt
except:
pl = platform.platform()
if (pl.startswith('Darwin')): # a clumsy way
ext = 'dylib'
else:
ext = 'so' # what about Microsoft??!!
os.environ["METIS_DLL"] = pkg_resources.resource_filename('dlg.dropmake', 'lib/libmetis.{0}'.format(ext)) # @UndefinedVariable
import metis as mt
if not hasattr(mt, '_dlg_patched'):
mt._part_graph = mt.part_graph
def logged_part_graph(*args, **kwargs):
logger.info('Starting metis partitioning')
start = time.time()
ret = mt._part_graph(*args, **kwargs) # @UndefinedVariable
logger.info('Finished metis partitioning in %.3f [s]', time.time() - start)
return ret
mt.part_graph = logged_part_graph
mt._dlg_patched = True
return mt
@staticmethod
def build_dag_from_drops(drop_list, embed_drop=True, fake_super_root=False):
"""
return a networkx Digraph (DAG)
:param: fake_super_root whether to create a fake super root node in the DAG
If set to True, it enables edge zero-based scheduling agorithms to make
more aggressive merging
"""
# tw - task weight
# dw - data weight / volume
key_dict = dict() # {oid : node_id}
drop_dict = dict() # {oid : drop}
out_bound_keys = ['streamingConsumers', 'consumers', 'outputs']
for i, drop in enumerate(drop_list):
oid = drop['oid']
key_dict[oid] = i + 1 # starting from 1
drop_dict[oid] = drop
G = nx.DiGraph()
for i, drop in enumerate(drop_list):
oid = drop['oid']
myk = i + 1
tt = drop["type"]
if (DropType.PLAIN == tt):
# if (drop['nm'] == 'StreamNull'):
# obk = 'streamingConsumers'
# else:
# obk = 'consumers' # outbound keyword
tw = 0
dtp = 0
elif (DropType.APP == tt):
#obk = 'outputs'
tw = int(drop['tw'])
dtp = 1
elif DropType.SERVICE_APP == tt:
tw = int(drop['tw'])
dtp = 1
else:
raise SchedulerException("Drop Type '{0}' not supported".\
format(tt))
num_cpus = drop.get('num_cpus', 1)
if (embed_drop):
G.add_node(myk, weight=tw, text=drop['nm'], dt=dtp,
drop_spec=drop, num_cpus=num_cpus)
else:
G.add_node(myk, weight=tw, text=drop['nm'], dt=dtp,
num_cpus=num_cpus)
for obk in out_bound_keys:
if obk in drop:
for oup in drop[obk]:
if (DropType.PLAIN == tt):
G.add_weighted_edges_from([(myk, key_dict[oup], int(drop['dw']))])
elif (DropType.APP == tt):
G.add_weighted_edges_from([(myk, key_dict[oup], int(drop_dict[oup].get('dw', 5)))])
if (fake_super_root):
super_root = dropdict({'oid':'-92', "type": DropType.PLAIN, 'storage':'null'})
super_k = len(drop_list) + 1
G.add_node(super_k, weight=0, dtp=0, drop_spec=super_root,
num_cpus=0, text='fake_super_root')
for oup in get_roots(drop_list):
G.add_weighted_edges_from([(super_k, key_dict[oup], 1)])
return G
@staticmethod
def metis_part(G, num_partitions):
"""
Use metis binary executable (instead of library)
This is used only for testing when libmetis halts unexpectedly
"""
outf = '/tmp/mm'
lines = []
part_id_line_dict = dict() # {part_id: line_num}
line_part_id_dict = dict()
for i, n in enumerate(G.nodes()):
part_id_line_dict[n] = i + 1
line_part_id_dict[i + 1] = n
for i, node in enumerate(G.nodes(data=True)):
n = node[0]
line = []
line.append(str(node[1]['wkl']))
line.append(str(node[1]['eff']))
for m in G.neighbors(n):
line.append(str(part_id_line_dict[m]))
a = G[m][n]['weight']
if (0 == a):
logger.debug("G[%d][%d]['weight'] = %f", m, n, a)
line.append(str(G[m][n]['weight']))
lines.append(" ".join(line))
header = "{0} {1} 011 2".format(len(G.nodes()), len(G.edges()))
lines.insert(0, header)
with open(outf, "w") as f:
f.write("\n".join(lines))
if __name__ == "__main__":
G = nx.DiGraph()
G.add_weighted_edges_from([(4,3,1), (3,2,4), (2,1,2), (5,3,1)])
G.add_weighted_edges_from([(3,6,5), (6,7,2)])
G.add_weighted_edges_from([(9,12,2)]) # testing independent nodes
G.node[3]['weight'] = 65
print(G.pred[12].items())
print(G.node[G.predecessors(12)[0]])
# print "prepre"
# print len(G.pred[7].items())
# print G.predecessors(7)
# print G.pred[7].items()
# print ""
#
# print G.nodes(data=True)
# print G.edges(data=True)
print("topological sort\n")
print(nx.topological_sort(G))
# for i, v in enumerate(nx.topological_sort(G)):
# print i, v
lp = DAGUtil.get_longest_path(G)
print("The longest path is {0} with a length of {1}".format(lp[0], lp[1]))
mw = DAGUtil.get_max_width(G)
dop = DAGUtil.get_max_dop(G)
print("The max (weighted) width = {0}, and the max degree of parallelism = {1}".format(mw, dop))
DAGUtil.label_schedule(G)
print(G.nodes(data=True))
gantt_matrix = DAGUtil.ganttchart_matrix(G)
print(gantt_matrix)
print(gantt_matrix.shape)
# sch = Schedule(G, 5)
# sch_mat = sch.schedule_matrix
# print sch_mat
# print sch_mat.shape
#print DAGUtil.prune_antichains([[], [64], [62], [62, 64], [61], [61, 64], [61, 62], [61, 62, 64], [5], [1]])
| steve-ord/daliuge | daliuge-translator/dlg/dropmake/scheduler.py | Python | lgpl-2.1 | 48,617 |
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import StringIO
import sys
import fixtures
import mock
from nova.cmd import manage
from nova import context
from nova import db
from nova.db import migration
from nova.db.sqlalchemy import migration as sqla_migration
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_network
from nova.tests.unit import test_flavors
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertEqual(2, self.commands.reserve('55.55.55.55'))
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.list()
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.list('banana')
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertIn(str(ip), expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertEqual(2, len(list(result)))
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertEqual(6, len(list(result)))
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertEqual(14, len(list(result)))
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertEqual(65534, len(list(result)))
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vlan_start': 201,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan'], 200)
self.assertEqual(kwargs['vlan_start'], 201)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan=200,
vlan_start=201,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': 'id',
'cidr': 'IPv4',
'cidr_v6': 'IPv6',
'dhcp_start': 'start address',
'dns1': 'DNS1',
'dns2': 'DNS2',
'vlan': 'VlanID',
'project_id': 'project',
'uuid': "uuid"}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class NeutronV2NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.commands = manage.NetworkCommands()
def test_create(self):
self.assertEqual(2, self.commands.create())
def test_list(self):
self.assertEqual(2, self.commands.list())
def test_delete(self):
self.assertEqual(2, self.commands.delete())
def test_modify(self):
self.assertEqual(2, self.commands.modify('192.168.0.1'))
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = manage.ProjectCommands()
def test_quota(self):
output = StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
print_format = "%-36s %-10s" % ('instances', 'unlimited')
self.assertIn(print_format, result)
def test_quota_update_invalid_key(self):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
class VmCommandsTestCase(test.TestCase):
def setUp(self):
super(VmCommandsTestCase, self).setUp()
self.commands = manage.VmCommands()
self.fake_flavor = objects.Flavor(**test_flavors.DEFAULT_FLAVORS[0])
def test_list_without_host(self):
output = StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_filters') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(), host='foo-host',
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('foo-host', result)
def test_list_with_host(self):
output = StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_host') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(),
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list(host='fake-host')
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('fake-host', result)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertEqual(1, self.commands.archive_deleted_rows(-1))
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 0})
def test_null_instance_uuid_scan_no_records_found(self, mock_scan):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.null_instance_uuid_scan()
self.assertIn("There were no records found", sys.stdout.getvalue())
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 1, 'bar': 0})
def _test_null_instance_uuid_scan(self, mock_scan, delete):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO()))
self.commands.null_instance_uuid_scan(delete)
output = sys.stdout.getvalue()
if delete:
self.assertIn("Deleted 1 records from table 'foo'.", output)
self.assertNotIn("Deleted 0 records from table 'bar'.", output)
else:
self.assertIn("1 records in the 'foo' table", output)
self.assertNotIn("0 records in the 'bar' table", output)
self.assertNotIn("There were no records found", output)
def test_null_instance_uuid_scan_readonly(self):
self._test_null_instance_uuid_scan(delete=False)
def test_null_instance_uuid_scan_delete(self):
self._test_null_instance_uuid_scan(delete=True)
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='main')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='main')
class ApiDbCommandsTestCase(test.TestCase):
def setUp(self):
super(ApiDbCommandsTestCase, self).setUp()
self.commands = manage.ApiDbCommands()
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='api')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='api')
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
def test_service_disable_invalid_params(self):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
class CellCommandsTestCase(test.TestCase):
def setUp(self):
super(CellCommandsTestCase, self).setUp()
self.commands = manage.CellCommands()
def test_create_transport_hosts_multiple(self):
"""Test the _create_transport_hosts method
when broker_hosts is set.
"""
brokers = "127.0.0.1:5672,127.0.0.2:5671"
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts=brokers)
self.assertEqual(2, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
self.assertEqual('127.0.0.2', thosts[1].hostname)
self.assertEqual(5671, thosts[1].port)
def test_create_transport_hosts_single(self):
"""Test the _create_transport_hosts method when hostname is passed."""
thosts = self.commands._create_transport_hosts('guest', 'devstack',
hostname='127.0.0.1',
port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(80, thosts[0].port)
def test_create_transport_hosts_single_broker(self):
"""Test the _create_transport_hosts method for single broker_hosts."""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672')
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_both(self):
"""Test the _create_transport_hosts method when both broker_hosts
and hostname/port are passed.
"""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672',
hostname='127.0.0.2', port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_wrong_val(self):
"""Test the _create_transport_hosts method when broker_hosts
is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:5672,127.0.0.1')
def test_create_transport_hosts_wrong_port_val(self):
"""Test the _create_transport_hosts method when port in
broker_hosts is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:')
def test_create_transport_hosts_wrong_port_arg(self):
"""Test the _create_transport_hosts method when port
argument is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
hostname='127.0.0.1', port='ab')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts_with_url_decoding_fix(self,
mock_db_cell_create,
mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://the=user:the=password@127.0.0.1:5432/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432',
woffset=0, wscale=0,
username="the=user",
password="the=password")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when hostname and port is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
hostname='127.0.0.1', port="9999",
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
| scripnichenko/nova | nova/tests/unit/test_nova_manage.py | Python | apache-2.0 | 25,186 |
import functools
import os
import re
from pathlib import Path
from typing import Optional, Union
from future import __annotations__
from .classes import *
class ValidateBusinessMails:
'''
This class provides 3 properties and 2 independent functions
- Properties: email, email_verifier, business_email_verifier
- Independent Functions: validate_email, validate_business_email
Usage:
- validate_email:
ValidateBusinessMails.validate_email(examplemail@domain.com)
- validate_business_email:
ValidateBusinessMails.validate_business_email(examplemail@domain.com)
- email_verifier:
ValidateBusinessMails(examplemail@domain.com).email_verifier
- business_email_verifier:
ValidateBusinessMails(examplemail@domain.com).business_email_verifier
'''
__slots__ = ['email', 'email_verifier', 'business_email_verifier']
def __init__(self, email: str) -> None:
self.email = email
def __str__(self) -> str:
return f'<Validate Business Mails | {self.email}>'
@functools.lru_cache(maxsize=2)
def validate_email(email: str) -> Optional[Union[ValidEmail, InvalidEmail]]:
'''
This is function which validate if is it a email or not.
It raises `Union[ValidEmail, InvalidEmail]`
The function is also cached using lru_cache
'''
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if re.fullmatch(regex, email):
raise InvalidEmail(email)
else:
raise ValidEmail(email)
@functools.lru_cache(maxsize=2)
def validate_business_email(email: str) -> Optional[Union[ValidBusinessEmail, InvalidBusinessMail]]:
'''
This is function which validate if is it a business email or not.
It raises `Union[ValidBusinessEmail, InvalidBusinessMail]`
The function is also cached using lru_cache
'''
try:
email_validation = validate_email(email)
except ValidEmail:
pass
BASE_DIR = Path(__file__).resolve().parent.parent
data_set_path = BASE_DIR / os.path.join('freeEmailService.txt')
with open(data_set_path, 'r') as f:
data_set = list(map(lambda domain: domain.strip(
'\n').strip(' ').lower(), f.readlines()))
if email.strip('\n').strip(' ').lower().split('@')[-1] in data_set:
raise InvalidBusinessMail(email)
else:
raise ValidBusinessEmail(email)
@property
@functools.lru_cache(maxsize=2)
def email_verifier(self) -> bool:
'''
This is function which validate if is it a email or not (property).
It return `bool`
The function is also cached using lru_cache
'''
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if re.fullmatch(regex, self.email):
return False
else:
return True
@property
@functools.lru_cache(maxsize=2)
def business_email_verifier(self) -> bool:
'''
This is function which validate if is it a business email or not (property).
It return `bool`
The function is also cached using lru_cache
'''
try:
email_validation = validate_email(self.email)
except ValidEmail:
pass
BASE_DIR = Path(__file__).resolve().parent.parent
data_set_path = BASE_DIR / os.path.join('freeEmailService.txt')
with open(data_set_path, 'r') as f:
data_set = list(map(lambda domain: domain.strip(
'\n').strip(' ').lower(), f.readlines()))
if email.strip('\n').strip(' ').lower().split('@')[-1] in data_set:
return False
else:
return True
| LoginRadius/business-email-validator | dist/python-wrapper/business-email-validator/validate_class.py | Python | gpl-2.0 | 3,872 |
#coding=utf-8
import time
import sys
import requests
sys.path.append('/home/workDir/hubi/auto-btc')
from Util import *
from key import *
'''
获取账号详情
'''
def getAccountInfo(method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
下单接口
@param coinType
@param price
@param amount
@param tradePassword
@param tradeid
@param method
'''
def buy(coinType,price,amount,tradePassword,tradeid,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"price":price,"coin_type":coinType,"amount":amount,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
if tradePassword:
params['trade_password']=tradePassword
if tradeid:
params['trade_id']=tradeid
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
提交市价单接口
@param coinType
@param amount
@param tradePassword
@param tradeid
'''
def buyMarket(coinType,amount,tradePassword,tradeid,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"amount":amount,"method":method}
sign=signature(params)
params['sign']=sign
if tradePassword:
params['trade_password']=tradePassword
if tradeid:
params['trade_id']=tradeid
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
撤销订单
@param coinType
@param id
'''
def cancelOrder(coinType,id,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"id":id,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
查询个人最新10条成交订单
@param coinType
'''
def getNewDealOrders(coinType,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
根据trade_id查询oder_id
@param coinType
@param tradeid
'''
def getOrderIdByTradeId(coinType,tradeid,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"method":method,"trade_id":tradeid}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
获取所有正在进行的委托
@param coinType
'''
def getOrders(coinType,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
获取订单详情
@param coinType
@param id
'''
def getOrderInfo(coinType,id,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"method":method,"id":id}
sign=signature(params)
params['sign']=sign
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
限价卖出
@param coinType
@param price
@param amount
@param tradePassword
@param tradeid
'''
def sell(coinType,price,amount,tradePassword,tradeid,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"price":price,"coin_type":coinType,"amount":amount,"method":method}
sign=signature(params)
params['sign']=sign
del params['secret_key']
if tradePassword:
params['trade_password']=tradePassword
if tradeid:
params['trade_id']=tradeid
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
市价卖出
@param coinType
@param amount
@param tradePassword
@param tradeid
'''
def sellMarket(coinType,amount,tradePassword,tradeid,method):
timestamp = long(time.time())
params = {"access_key": ACCESS_KEY,"secret_key": SECRET_KEY, "created": timestamp,"coin_type":coinType,"amount":amount,"method":method}
sign=signature(params)
params['sign']=sign
if tradePassword:
params['trade_password']=tradePassword
if tradeid:
params['trade_id']=tradeid
del params['secret_key']
payload = urllib.urlencode(params)
r = requests.post(HUOBI_SERVICE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
'''
获取实时价格
'''
def get_realtime_price():
r = requests.post(HUOBI_REALTIME_API)
if r.status_code == 200:
data = r.json()
return data
else:
return None
| bupt075225/auto-btc | HuobiService.py | Python | mit | 6,500 |
from bs4 import BeautifulSoup as BS4
import json
with open('list.html', 'r', encoding='utf-8') as node_f:
node_file = BS4(node_f, 'lxml')
node_list = node_file.find_all('li')
for i in node_list:
print(i.a.text + ', http://data.taichung.gov.tw/wSite/' + i.a.get('href'))
| OpenData-TW/DataPackage-DGTW | Datasets/taichung.gov.tw/getlist.py | Python | mit | 281 |
# -*- coding: utf-8 -*-
#
# Citizens Agenda documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 23 14:11:37 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Citizens Agenda'
copyright = u'2011, Oxys SARL'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CitizensAgendadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CitizensAgenda.tex', u'Citizens Agenda Documentation',
u'Oxys SARL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'citizensagenda', u'Citizens Agenda Documentation',
[u'Oxys SARL'], 1)
]
| oysnet/czagenda-api | docs/conf.py | Python | bsd-3-clause | 7,106 |
from django.contrib import admin
from django.urls import include, path
from . import views
urlpatterns = [
path("", views.home, name="home"),
path("<str:game_name>", views.home, name="home"),
path("admin/", admin.site.urls),
]
| GoogleCloudPlatform/serverless-expeditions | cloud-run-django-terraform/gametracker/urls.py | Python | apache-2.0 | 240 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Lucas Huber, Copyright CoĐoo Project
# based on account_wallet by Yannick Buron, Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import fields, orm
import openerp.addons.decimal_precision as dp
class ExchangeConfigSettings(orm.Model):
# Add Exchange configuration (parameters) to Exchange settings
_inherit = 'exchange.config.settings'
_columns = {
'name': fields.char(
'Exchange Name',
required=True,
size=21,
help='Name of the Exchange'
),
'code': fields.char(
'Exchange Code',
required=False,
default="GB WXYZ",
size=7,
help="Unique Exchange Code (EC)"
"First part of the 20 digits Account Code CC BBBB"
"CC country code -> DE Germany"
"BBBB Exchange code"
),
'display_balance': fields.boolean('Everyone can see balances?'),
'journal_id': fields.many2one(
'account.journal', 'Community Journal', required=True
),
'account_ids': fields.one2many(
'exchange.config.accounts', 'config_id', 'Accounts templates'
# domain=lambda self: [('name', '=', self._name)],
# auto_join=True, string='Lines'
),
# TODO is ev. not used anymore in the future
'default_currency_id': fields.many2one(
'res.currency', 'Default currency',
# domain=[('wallet_currency', '=', True)], required=False
),
'use_account_numbers': fields.boolean(
'Use of Account Numbering System',
help="Use of the 20 digits Account Numbering Code 'CC BBBB DDDDDDDD XXXX-KK'"
),
}
class AccountTypesType(orm.Model):
# Lines containing the a list accounts types
_name = 'exchange.account.type'
_description = 'Exchange Accounts Types list'
_columns = {
'name': fields.char(
'Account Type Key', required=True, size=2, default="XX",
help="Account key examples"
"PD Private User Default account"
"PU Private User sub-account"
"BD Business User Default account"
"BC Business User Credit account"
"SY System account"
),
'account_name': fields.char(
'Account name', size=32, required=True,
translate=True, default='XY User account',
help="Name of the Account"
),
'account_desc': fields.char(
'Account Type Description',required=False,
help='Description'
),
}
_sql_constraints = [
(
'typename_unique', 'unique(name)',
'We can only have one line per name'
),
(
'account_name_unique', 'unique(account_name)',
'We can only have one line per key'
)
]
class AccountTypesConfig(orm.Model):
# Lines containing the general configuration for accounts types
_name = 'exchange.config.accounts'
_description = 'Exchange Account Type/Template configuration'
_columns = {
'name': fields.char(
'Account Name', required=True, size=40, translate=True,
help='Name of the Account'),
'account_type': fields.selection([
('user', 'User account'),
('system', 'System account'),
('clearing', 'Clearing account'),
('rating', 'Rating account'),
], 'Account Type', readonly=False,
required=True, default='user',
help="Type of account /n"
"User Account, belongs to a user"
"System Account, belongs to the system or organisation"
"Clearing Account, belongs to the system or organisation"),
'type_prefix': fields.many2one(
'exchange.account.type', 'Account Number Prefix/Type', required=False, size=2,
help="Prefix for Number of the Accounts"
"in last part of the 21 digits Account Code"),
'config_id': fields.many2one(
'exchange.config.settings', 'Config ID', required=True),
'accounts_ids': fields.one2many(
'exchange.accounts', 'template_id', 'Related accounts',
help='Related accounts for transactions'),
'hidden': fields.boolean(
'Hidden Account',
help='Account is hidden to users'),
# TODO Filter on many2one about 'product.public.category' = Membership
'membership_type': fields.many2one(
'product.product', 'Type of membership', required=False,
help='For this of membership the accounts will be used'),
'default_account': fields.boolean(
'Default Account',
default=False,
help='This account will be used/attached for new users of the group'),
'currency_id': fields.many2one(
'res.currency', 'Currency', required=True),
'limit_negative': fields.boolean('Limit - ?'),
'limit_negative_value': fields.float(
'ValueLimit -', digits_compute=dp.get_precision('Product Price'),
default=-500.0),
'limit_positive': fields.boolean('Limit + ?'),
'limit_positive_value': fields.float(
'Value Limit +', digits_compute=dp.get_precision('Product Price'),
default=500.0),
'account_id': fields.many2one(
'account.account', 'Related account', required=False,
help='Related account for Odoo Accounting purpose'),
'exchange_provider_id': fields.boolean(
'External DB',
help='Check if an outside transaction engine exists'),
'external_ledger_id': fields.many2one(
'distributed.db.list', 'External Ledger ID'),
'initcredit': fields.float(
'Initial amount of currency',
help='Initial amount currency of User gets on this account'),
'initcredit_type': fields.many2one(
'exchange.transaction.type',
'Initial credit transaction type'),
# Related fields (not stored in DB)
'currency_symbol': fields.related('currency_id',
'symbol', string='Currency Symbol', type='many2one',
relation='res.currency', readonly=True),
}
_sql_constraints = [
(
'name', 'unique(name)',
'We can only have one line per name'
)
]
'''
def update_all_partners(self, cr, uid, context=None):
# Update balances on all partners
partner_obj = self.pool.get('res.partner')
partner_ids = partner_obj.search(cr, uid, [], context=context)
partner_obj.update_wallet_balance(
cr, uid, partner_ids, context=context
)
def create(self, cr, uid, vals, context=None):
# Mark the currency as wallet and then
# update balance on all partners at creation
self.pool.get('res.currency').write(
cr, uid, [vals['currency_id']], {'wallet_currency': True},
context=context
)
res = super(AccountTypesConfig, self).create(
cr, uid, vals, context=context
)
self.update_all_partners(cr, uid, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# Update balance on all partners when modified
res = super(AccountTypesConfig, self).write(
cr, uid, ids, vals, context=context
)
self.update_all_partners(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# Remove the wallet flag on the currency
# and then update balance on all partners
for currency in self.browse(cr, uid, ids, context=context):
self.pool.get('res.currency').write(
cr, uid, [currency.currency_id.id],
{'wallet_currency': False}, context=context
)
res = super(AccountTypesConfig, self).unlink(
cr, uid, ids, context=context
)
self.update_all_partners(cr, uid, context=context)
return res
'''
| moneygrid/vertical-exchange | exchange/__old/res_config.py | Python | gpl-3.0 | 9,110 |
#!/usr/bin/env python
## @package artnet_light_control
# Documentation for this module.
#
# More details.
"""
To activate a specific settings for the application, run:
source env/bin/activate
export APP_SETTINGS="config.DevelopmentConfig"
OR export APP_SETTINGS="config.ProductionConfig"
OR export APP_SETTINGS="config.StagingConfig"
OR export APP_SETTINGS="config.TestingConfig"
"""
# Run in development mode... for now
import os
import config
from flask import Flask, url_for
from flask import render_template, request, flash, redirect
import time
import logging
from colour import Color
from artnet import shared
from artnet import dmx_rig
from artnet import dmx_fixture
from artnet import dmx_frame
from artnet import dmx_chase
from artnet import dmx_show
from artnet import dmx_cue
from artnet import dmx_controller
from artnet import dmx_effects
#logging.basicConfig(format='%(levelname)s:%(message)s', filename='artNet_controller.log', level=logging.DEBUG)
#log = logging.getLogger(__name__)
os.system('export APP_SETTINGS="config.ProductionConfig"')
# Create the application
app = Flask(__name__)
# Load app configuration
#app.config.from_object(os.environ['APP_SETTINGS'])
#app.config.from_object(config.DevelopmentConfig)
app.config.from_object(config.ProductionConfig)
#import dmx_NBRLib
#import colorsys
DEG30 = 30/360
DEG180 = 180/360
DEG120 = 120/360
# Calculate colors on the color wheel: shifting colors based on the current color given in parameter
def adjacent_color(c, d=DEG30): # Assumption: c : color as defined in the colour library
#r, g, b = map(lambda x: x/255., [r, g, b]) # Convert to [0, 1]
#h, l, s = colorsys.rgb_to_hls(r, g, b) # RGB -> HLS
(h, s, l) = c.hsl
# h = [(h+d) % 1 for d in (-d, d)] # Rotation by d
h = (h+d) % 1 # Rotation by d
#adjacent = [map(lambda x: int(round(x*255)), colorsys.hls_to_rgb(hi, l, s))
# for hi in h] # H'LS -> new RGB
newColor = Color(hue=h, saturation=s, luminance=l)
return newColor
# Default colors, based on the main color
mainColor = Color('black')
sideColor1 = adjacent_color(mainColor, DEG30)
sideColor2 = adjacent_color(mainColor, -DEG30)
triadColor1 = adjacent_color(mainColor, DEG120)
triadColor2 = adjacent_color(mainColor, -DEG120)
complementColor = adjacent_color(mainColor, DEG180)
sideComplementColor1 = adjacent_color(complementColor, DEG30)
sideComplementColor2 = adjacent_color(complementColor, -DEG30)
colorList = [mainColor.hex, sideColor1.hex, sideColor2.hex, complementColor, triadColor1, triadColor2, sideComplementColor1, sideComplementColor2]
# Default program name
currentProgramName = ""
# Number of HTML pages
MAX_PAGES = 2
currentPage = 1
# Description of the HTML pages
title = "Salle de bain"
introduction="<h1>Quelle couleur pour obtenir quels effets ?</h1><p>S'il existe une symbolique arbitraire construite par les différentes cultures et civilisations, une signification réelle est intrinsèque aux couleurs, par les effets et mouvements de la nature, les phénomènes universels de la vie et le profond inconscient commun à toute l'espèce humaine. On distingue plusieurs catégories de couleurs selon leurs vertus.</p>"
imageList = ['../static/douche1.jpg', '../static/douche6.jpg', '../static/douche3.jpg', '../static/douche4.jpg']
pageTitle = ['Les programmes', 'Les couleurs', '', '']
# Descriptions of the choices in the HTML page - Colors
choiceList=[]
aFrame = dict(name='Vert', image='', color='green', description="Anti-allergiques et antibiotiques", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Turquoise', image='', color='turquoise', description="Mobilise les cycles chronobiologiques", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Magenta', image='', color='magenta', description="Représente la fusion, l'amour, le rêve mais également la vulnérabilité", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Violet', image='', color='purple', description="Est un immunostimulant, pour la circulation veineuse et contre les migraines", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Bleu Foncé', image='', color='darkblue', description="On le préconise comme somnifère et antibactérien, détuméfiant. Il participe de la synchronisation entre les deux hémisphères cérébraux", page=1)
choiceList.append(aFrame)
aFrame = dict(name='BleuCiel', image='', color='lightskyblue', description="Symbolise le souffle, la communication, l'échange, le partage. Il est anti-inflammatoire et rafraîchissant", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Outremer', image='mediumblue', color='green', description="Inhibiteur et antistress. Il porte la tempérance, le calme et l'introspection", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Rouge', image='', color='red', description="Il permet de tonifier et dynamiser par ses vertus anti-anémiques", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Orange', image='', color='orange', description="On l'utilise en anti-dépresseur ou stimulant neurosensoriel", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Jaune', image='', color='yellow', description="On s'en sert comme stimulant digestif et lymphatique, pour l'estomac et les glandes exocrines", page=1)
choiceList.append(aFrame)
aFrame = dict(name='Vert', image='', color='green', description="Anti-allergiques et antibiotiques", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Turquoise', image='', color='turquoise', description="Mobilise les cycles chronobiologiques", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Magenta', image='', color='magenta', description="Représente la fusion, l'amour, le rêve mais également la vulnérabilité", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Violet', image='', color='purple', description="Est un immunostimulant, pour la circulation veineuse et contre les migraines", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Bleu Foncé', image='', color='darkblue', description="On le préconise comme somnifère et antibactérien, détuméfiant. Il participe de la synchronisation entre les deux hémisphères cérébraux", page=2)
choiceList.append(aFrame)
aFrame = dict(name='BleuCiel', image='', color='lightskyblue', description="Symbolise le souffle, la communication, l'échange, le partage. Il est anti-inflammatoire et rafraîchissant", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Outremer', image='mediumblue', color='green', description="Inhibiteur et antistress. Il porte la tempérance, le calme et l'introspection", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Rouge', image='', color='red', description="Il permet de tonifier et dynamiser par ses vertus anti-anémiques", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Orange', image='', color='orange', description="On l'utilise en anti-dépresseur ou stimulant neurosensoriel", page=2)
choiceList.append(aFrame)
aFrame = dict(name='Jaune', image='', color='yellow', description="On s'en sert comme stimulant digestif et lymphatique, pour l'estomac et les glandes exocrines", page=2)
choiceList.append(aFrame)
# Descriptions of the choices in the HTML page - Programs
programList=[]
aFrame = dict(name='Tonic 5mn', image='', color='', description="Programme tonic rapide (5mn)", page=1)
programList.append(aFrame)
aFrame = dict(name='Tonic 10mn', image='', color='', description="Programme tonic de 10mn", page=1)
programList.append(aFrame)
aFrame = dict(name='Tonic 20mn', image='', color='', description="Programme tonic de 20mn", page=1)
programList.append(aFrame)
aFrame = dict(name='Paisible 5mn', image='', color='', description="Programme relaxant rapide (5mn)", page=1)
programList.append(aFrame)
aFrame = dict(name='Paisible 10mn', image='', color='', description="Programme relaxant de 10mn", page=1)
programList.append(aFrame)
aFrame = dict(name='Paisible 20mn', image='', color='', description="Programme relaxant de 20 mn", page=1)
programList.append(aFrame)
aFrame = dict(name='Paisible 30mn', image='', color='', description="Programme relaxant de 30 mn", page=1)
programList.append(aFrame)
aFrame = dict(name='Fin', image='', color='', description="Fin de programme", page=1)
programList.append(aFrame)
aFrame = dict(name='Nuit', image='', color='', description="Eclairage tamisé", page=1)
programList.append(aFrame)
aFrame = dict(name='Neutralisantes, nettoyantes et équilibrantes', image='', color='', description="<p><b>Vert:</b> A des vertus anti-allergiques et antibiotiques. Il représente l'indépendance, la nature, la liberté mais aussi la solitude et l'apprentissage.</p><p><b>Turquoise:</b> Mobilise les cycles chronobiologiques. Il symbolise la transformation, l'évolution, l'élimination et la purification.</p>", page=2)
programList.append(aFrame)
aFrame = dict(name='Cicatrisantes, protectrices et régénératrices', image='', color='', description="<p><b>Écarlate</b> Porte la fertilité, la féminité et l'enracinement, la terre. Ses vertus sont régénératrices et régulatrices.</p><p><b>Magenta:</b> Représente la fusion, l'amour, le rêve mais également la vulnérabilité. On l'utilise comme rajeunissant et équilibrant cardiovasculaire, ou aphrodisiaque.</p><p><b>Violet:</b> Est un immunostimulant, pour la circulation veineuse et contre les migraines. Il image l'esprit, la connaissance et la spiritualité.</p>", page=2)
programList.append(aFrame)
aFrame = dict(name='Calmantes, dispersantes et sédatives', image='', color='', description="<p><b>Bleu foncé:</b> Représente la nuit, l'inconscient, la méditation et la profondeur. On le préconise comme somnifère et antibactérien, détuméfiant. Il participe de la synchronisation entre les deux hémisphères cérébraux.</p><p><b>Bleu ciel:</b> Symbolise le souffle, la communication, l'échange, le partage. Il est anti-inflammatoire et rafraîchissant.</p><p><b>Outremer:</b> Est un inhibiteur et antistress. Il porte la tempérance, le calme et l'introspection.</p>", page=2)
programList.append(aFrame)
aFrame = dict(name='Énergisantes et tonifiantes', image='', color='', description="<p><b>Rouge:</b> Symbolise la chaleur, la vitalité, l'engagement et le courage. Il permet de tonifier et dynamiser par ses vertus anti-anémiques.</p><p><b>Orange:</b> Est un excitant qui stimule l'adrénaline. Il caractérise les mouvements, les rythmes et symbolise l'émotion, le contact. On l'utilise en anti-dépresseur ou stimulant neurosensoriel.</p><p><b>Jaune:</b> Représente le soleil, la conscience, la lucidité, le rayonnement personnel. On s'en sert comme stimulant digestif et lymphatique, pour l'estomac et les glandes exocrines.</p>", page=2)
programList.append(aFrame)
TIMEOUT = 5*60 # Time in second before blackout when no action is done
hostIP = "192.168.0.82" # Target address for the ArtNet frames, or empty for broadcast
# Create and load the current rig parameters
myRig = dmx_rig.Rig()
myRig.load("rigs/my-rig_3.yaml")
myRig.printRig()
shared.log.debug("Configure DMX controller")
# q = dmx_controller.Controller(hostIP, bpm=30, fps=20, nodaemon=True, runout=False, universe=1)
q = dmx_controller.Controller(hostIP, bpm=30, fps=20, timeout=TIMEOUT, nodaemon=True, runout=False, universe=1)
# Definition of the HTML routes for Flask framework
@app.route('/', methods=['GET', 'POST'])
def index():
#shared.log.debug(choiceList)
print('main_layout')
return render_template('main_layout.html', title = title, pageTitle = pageTitle, buttonList=choiceList, imageList=imageList, page=currentPage, maxPage=MAX_PAGES, introduction=introduction, programList=programList, colorList=colorList)
@app.route('/scene', methods = ['POST'])
def scene():
global colorList
global mainColor, sideColor1, sideColor2, triadColor1, triadColor2, complementColor, sideComplementColor1, sideComplementColor2
global currentProgramName
print('scene')
shared.log.debug("POST - scene")
sceneName = request.form['Scene']
shared.log.debug(sceneName)
for i in choiceList:
#print(i)
#print(i['name'])
if (i['name'] == sceneName):
currentScene = i
shared.log.debug('-->FOUND: %s - %s' % (sceneName, currentScene))
print('-->FOUND: %s - %s' % (sceneName, currentScene))
mainColor = Color(currentScene['color'])
sideColor1 = adjacent_color(mainColor, DEG30)
sideColor2 = adjacent_color(mainColor, -DEG30)
triadColor1 = adjacent_color(mainColor, DEG120)
triadColor2 = adjacent_color(mainColor, -DEG120)
complementColor = adjacent_color(mainColor, DEG180)
sideComplementColor1 = adjacent_color(complementColor, DEG30)
sideComplementColor2 = adjacent_color(complementColor, -DEG30)
colorList = [mainColor.hex, sideColor1.hex, sideColor2.hex, complementColor, triadColor1, triadColor2, sideComplementColor1, sideComplementColor2]
shared.log.debug(' MainColor: %s | %s | %s - %s ** %s' % (currentScene['color'], mainColor.hex, sideColor1.hex, sideColor2.hex, complementColor.hex))
print(' MainColor: %s | %s | %s - %s ** %s' % (currentScene['color'], mainColor.hex, sideColor1.hex, sideColor2.hex, complementColor.hex))
# Udpate the colors in the Cues
# Start the light program
if (currentProgramName != ""):
if currentProgramName in chaseList:
q.removeAll()
q.add(dmx_effects.create_chaseRun(q.get_clock(), myRig, chaseList[currentProgramName]))
shared.log.debug('Start light program [%s] with color %s' % (currentProgramName, mainColor))
print('Start light program [%s] with color %s' % (currentProgramName, mainColor))
break
break
#shared.log.debug(currentFrame)
return redirect('/')
@app.route('/program', methods = ['POST'])
def program():
global myRig
global colorList
global mainColor, sideColor1, sideColor2, triadColor1, triadColor2, complementColor, sideComplementColor1, sideComplementColor2
global currentProgramName
global q
print('program')
shared.log.debug("POST - program")
programName = request.form['Program']
shared.log.debug(programName)
for i in programList:
if (i['name'] == programName):
currentProgram = i
currentProgramName = programName
shared.log.debug('-->FOUND: %s - %s' % (programName, currentProgram))
print('-->FOUND: %s - %s' % (programName, currentProgram))
# Start the light program
if (currentProgramName != ""):
print("Check if currentProgramName [%s] is in rig.chases" % currentProgramName)
print("---%s" % myRig.chases[currentProgramName])
# print("%s" % myRig.chases)
if (currentProgramName in myRig.chases):
print("OK; continue")
q.removeAll()
print("OK; removed")
print("theChase.chase: %s" % myRig.chases[currentProgramName].chase)
q.add(dmx_effects.create_chaseRun(q.get_clock(), myRig, myRig.chases[currentProgramName]))
print("OK; added")
shared.log.debug('Start light program [%s] with color %s' % (currentProgramName, mainColor))
print('Start light program [%s] with color %s' % (currentProgramName, mainColor))
break
return redirect('/')
@app.route('/next', methods = ['GET'])
def next():
global currentPage
print('next')
currentPage = min(currentPage+1, MAX_PAGES)
shared.log.debug("GET - next - %s" % currentPage)
return redirect('/')
@app.route('/previous', methods = ['GET'])
def previous():
print('previous')
global currentPage
currentPage = max(currentPage-1, 1)
shared.log.debug("GET - previous - %s" % currentPage)
return redirect('/')
@app.route('/Toilette')
def toilette():
print('Toilette')
shared.log.debug("Toilette")
q.removeAll()
q.add(iter([[255] * 512]))
return redirect('/')
@app.route('/Douche')
def douche():
print('Douche')
q.removeAll()
q.add(iter([[180] * 512]))
return redirect('/')
@app.route('/Bain')
def bain():
print('Bain')
q.removeAll()
q.add(iter([[128] * 512]))
return redirect('/')
@app.route('/OFF')
def OFF():
print('Bain')
q.removeAll()
q.add(iter([[0] * 512]))
return redirect('/')
#######################################################
if __name__ == '__main__':
# Display HTML pages to control lights using ArtNet messages
q.add(dmx_effects.create_chaseRun(q.get_clock(), myRig, myRig.chases["Tonic 5mn"]))
shared.log.info("Start DMX controller with other effect")
q.start()
# Run the web server on port 5000
print("Run the web server on port 5000")
shared.log.debug("Run the web server on port 5000")
app.run('0.0.0.0')
shared.log.debug("End of processing.")
| nbremond77/Artnet_dmx_controler | Artnet_light_control.py | Python | gpl-3.0 | 17,388 |
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# sshserver.py - ssh protocol server support for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import socket
import sys
from . import encoding, error, hook, util, wireproto
from .i18n import _
from .pycompat import decodeutf8, encodeutf8, range
class sshserver(wireproto.abstractserverproto):
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
self.lock = None
self.fin = ui.fin
self.fout = ui.fout
self.name = "ssh"
hook.redirect(True)
ui.fout = repo.ui.fout = ui.ferr
# Prevent insertion/deletion of CRs
util.setbinary(self.fin)
util.setbinary(self.fout)
def getargs(self, args):
data = {}
keys = args.split()
for n in range(len(keys)):
argline = decodeutf8(self.fin.readline()[:-1])
arg, l = argline.split()
if arg not in keys:
raise error.Abort(_("unexpected parameter %r") % arg)
if arg == "*":
star = {}
for k in range(int(l)):
argline = decodeutf8(self.fin.readline()[:-1])
arg, l = argline.split()
val = decodeutf8(self.fin.read(int(l)))
star[arg] = val
data["*"] = star
else:
val = decodeutf8(self.fin.read(int(l)))
data[arg] = val
return [data[k] for k in keys]
def getarg(self, name):
return self.getargs(name)[0]
def getfile(self, fpout):
self.sendresponse("")
count = int(self.fin.readline())
while count:
fpout.write(self.fin.read(count))
count = int(self.fin.readline())
def redirect(self):
pass
def sendresponse(self, v):
self.sendbytesresponse(encodeutf8(v))
def sendbytesresponse(self, v):
self.fout.write(b"%d\n" % len(v))
self.fout.write(v)
self.fout.flush()
def sendstream(self, source):
write = self.fout.write
if source.reader:
gen = iter(lambda: source.reader.read(4096), "")
else:
gen = source.gen
for chunk in gen:
write(chunk)
self.fout.flush()
def sendpushresponse(self, rsp):
self.sendresponse("")
self.sendresponse(str(rsp.res))
def sendpusherror(self, rsp):
self.sendresponse(rsp.res)
def sendooberror(self, rsp):
self.ui.ferr.write("%s\n-\n" % rsp.message)
self.ui.ferr.flush()
self.fout.write(b"\n")
self.fout.flush()
def serve_forever(self):
try:
while self.serve_one():
pass
finally:
if self.lock is not None:
self.lock.release()
sys.exit(0)
handlers = {
bytes: sendbytesresponse,
str: sendresponse,
wireproto.streamres: sendstream,
wireproto.pushres: sendpushresponse,
wireproto.pusherr: sendpusherror,
wireproto.ooberror: sendooberror,
}
def serve_one(self):
cmd = self.fin.readline()[:-1]
cmd = decodeutf8(cmd)
if cmd:
if util.safehasattr(util, "setprocname"):
client = encoding.environ.get("SSH_CLIENT", "").split(" ")[0]
# Resolve IP to hostname
try:
client = socket.gethostbyaddr(client)[0]
except (socket.error, IndexError):
pass
reponame = os.path.basename(self.repo.root)
title = "hg serve (%s)" % " ".join(
filter(None, [reponame, cmd, client])
)
util.setprocname(title)
if cmd in wireproto.commands:
rsp = wireproto.dispatch(self.repo, self, cmd)
self.handlers[rsp.__class__](self, rsp)
else:
impl = getattr(self, "do_" + cmd, None)
if impl:
r = impl()
if r is not None:
self.sendresponse(r)
else:
self.sendresponse("")
return cmd != ""
def _client(self):
client = encoding.environ.get("SSH_CLIENT", "").split(" ", 1)[0]
return "remote:ssh:" + client
| facebookexperimental/eden | eden/hg-server/edenscm/mercurial/sshserver.py | Python | gpl-2.0 | 4,778 |
from setuptools import setup, find_packages
entry_points = {} # {'console_scripts': ['wanderbits = wanderbits:main']}
with open('readme.md', 'r') as fi:
long_description = fi.read()
# Do it.
setup(name='HackingForMovieTrends',
description='Quick examples of playing with movie data.',
long_description=long_description,
version='0.1.0',
url='https://github.com/Who8MyLunch/HackingForMovieTrends',
author='Pierre V. Villeneuve',
author_email='pierre.villeneuve@gmail.com',
packages=find_packages(),
entry_points=entry_points,
package_data={'': ['*.txt', '*.md', '*.yml']},
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7'],
zip_safe=False)
| Who8MyLunch/HackingForMovieTrends | setup.py | Python | mit | 953 |
import json
import aiohttp
async def version_check(ev):
version_url = 'https://api.lucia.moe/data/version'
async with aiohttp.ClientSession() as session:
async with session.get(version_url) as version_data:
data = await version_data.read()
data = json.loads(data)
official_stamp = data['build_date']
current_stamp = ev.bot.info.version.timestamp
if official_stamp > current_stamp:
current = f'{ev.bot.info.version.major}.{ev.bot.info.version.minor}.{ev.bot.info.version.patch}'
current += f' {ev.bot.info.version.codename}'
latest = f'{data["version"]["major"]}.{data["version"]["minor"]}.{data["version"]["patch"]}'
latest += f' {data["codename"]}'
ev.log.warning('---------------------------------')
ev.log.warning('Your Sigma version is outdated.')
ev.log.warning(f'CURRENT: {current}')
ev.log.warning(f'LATEST: {latest}')
ev.log.warning('Updating is strongly suggested.')
ev.log.warning('---------------------------------')
| AXAz0r/apex-sigma-core | sigma/modules/core_functions/system/version_check.py | Python | gpl-3.0 | 1,064 |
"""
RefererMiddleware: populates Request referer field, based on the Response which
originated it.
"""
from scrapy.http import Request
class RefererMiddleware(object):
def process_spider_output(self, response, result, spider):
def _set_referer(r):
if isinstance(r, Request):
r.headers.setdefault('Referer', response.url)
return r
return (_set_referer(r) for r in result or ())
| mzdaniel/oh-mainline | vendor/packages/scrapy/scrapy/contrib/spidermiddleware/referer.py | Python | agpl-3.0 | 440 |
import sys
from django.apps import apps
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.dispatch import Signal
from ..utils.autoupdate import Element, inform_changed_elements
# This signal is send when the migrate command is done. That means it is sent
# after post_migrate sending and creating all Permission objects. Don't use it
# for other things than dealing with Permission objects.
post_permission_creation = Signal()
# This signal is sent if a permission is changed (e. g. a group gets a new
# permission). Connected receivers may yield Collections.
permission_change = Signal()
def delete_django_app_permissions(sender, **kwargs):
"""
Deletes the permissions, Django creates by default. Only required
for auth, contenttypes and sessions.
"""
contenttypes = ContentType.objects.filter(
Q(app_label="auth") | Q(app_label="contenttypes") | Q(app_label="sessions")
)
Permission.objects.filter(content_type__in=contenttypes).delete()
def get_permission_change_data(sender, permissions, **kwargs):
"""
Yields all necessary Cachables if the respective permissions change.
"""
core_app = apps.get_app_config(app_label="core")
for permission in permissions:
if permission.content_type.app_label == core_app.label:
if permission.codename == "can_see_projector":
yield core_app.get_model("Projector")
elif permission.codename == "can_manage_projector":
yield core_app.get_model("ProjectorMessage")
yield core_app.get_model("Countdown")
elif permission.codename == "can_use_chat":
yield core_app.get_model("ChatMessage")
def autoupdate_for_many_to_many_relations(sender, instance, **kwargs):
"""
Send autoupdate for many-to-many related objects if the other side
is deleted.
"""
# Hotfix for #4501: Skip autoupdate for many-to-many related objects
# during migrations.
if "migrate" in sys.argv:
return
m2m_fields = (
field
for field in instance._meta.get_fields(include_hidden=True)
if field.many_to_many and field.auto_created
)
for field in m2m_fields:
queryset = getattr(instance, field.get_accessor_name()).all()
for related_instance in queryset:
if hasattr(related_instance, "get_root_rest_element"):
# The related instance is or has a root rest element.
# So lets send it via autoupdate.
root_rest_element = related_instance.get_root_rest_element()
inform_changed_elements(
[
Element(
collection_string=root_rest_element.get_collection_string(),
id=root_rest_element.pk,
full_data=None,
reload=True,
)
]
)
| emanuelschuetze/OpenSlides | openslides/core/signals.py | Python | mit | 3,069 |
#!/usr/bin/env python
from email.Utils import formatdate
from string import Template
from argparse import ArgumentParser
from hashlib import sha1
import subprocess
import base64
parser = ArgumentParser(description='Create an appcast XML file from a template')
parser.add_argument('--bundle_file','-f',
required=True,
help='The bundle file to generate the appcast for')
parser.add_argument('--build_number','-n',
required=True,
help='The build number of the app')
parser.add_argument('--signing_key',
help='The DSA key to sign the update with')
parser.add_argument('--template_file',
required=True,
help='The template XML file')
parser.add_argument('--output','-o',
help='Write the output to this file instead of STDOUT')
args = parser.parse_args()
bundle = open(args.bundle_file, 'r').read()
templateSource = open(args.template_file, 'r').read()
attrs = dict()
attrs['build_number'] = args.build_number
attrs['pub_date'] = formatdate()
attrs['file_size'] = len(bundle)
if args.signing_key:
hash = sha1(bundle).digest()
keyFile = open(args.signing_key, 'r')
signProc = subprocess.Popen(['openssl',
'dgst',
'-dss1',
'-sign',
args.signing_key],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
signProc.stdin.write(hash)
binsig = signProc.communicate()[0]
attrs['file_sig'] = base64.b64encode(binsig)
result = Template(templateSource).substitute(attrs)
if args.output:
outputFile = open(args.output, 'w')
outputFile.write(result)
else:
print result
| bevacqua/gitx | updates/appcast.py | Python | gpl-2.0 | 1,571 |
from test_support import *
prove_all(opt=["-cargs", "-gnato13"])
prove_all()
| ptroja/spark2014 | testsuite/gnatprove/tests/N122-016__switches/test.py | Python | gpl-3.0 | 78 |
# Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Runs all 3 steps to go from input DNA reads to output VCF/gVCF files.
This script currently provides the most common use cases and standard models.
If you want to access more flags that are available in `make_examples`,
`call_variants`, and `postprocess_variants`, you can also call them separately
using the binaries in the Docker image.
For more details, see:
https://github.com/google/deepvariant/blob/r1.3/docs/deepvariant-quick-start.md
"""
import os
import subprocess
import sys
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
FLAGS = flags.FLAGS
# Required flags.
flags.DEFINE_enum(
'model_type', None, ['WGS', 'WES', 'PACBIO', 'HYBRID_PACBIO_ILLUMINA'],
'Required. Type of model to use for variant calling. Set this flag '
'to use the default model associated with each type, and it will set '
'necessary flags corresponding to each model. If you want to use a '
'customized model, add --customized_model flag in addition to this flag.')
flags.DEFINE_string(
'ref', None,
'Required. Genome reference to use. Must have an associated FAI index as '
'well. Supports text or gzipped references. Should match the reference '
'used to align the BAM file provided to --reads.')
flags.DEFINE_string(
'reads', None,
'Required. Aligned, sorted, indexed BAM file containing the reads we want '
'to call. Should be aligned to a reference genome compatible with --ref.')
flags.DEFINE_string('output_vcf', None,
'Required. Path where we should write VCF file.')
# Optional flags.
flags.DEFINE_boolean(
'dry_run', False,
'Optional. If True, only prints out commands without executing them.')
flags.DEFINE_string(
'intermediate_results_dir', None,
'Optional. If specified, this should be an existing '
'directory that is visible insider docker, and will be '
'used to to store intermediate outputs.')
flags.DEFINE_string(
'logging_dir', None, 'Optional. Directory where we should write log files '
'for each stage and optionally runtime reports.')
flags.DEFINE_boolean(
'runtime_report', False, 'Output make_examples runtime metrics '
'and create a visual runtime report using runtime_by_region_vis. '
'Only works with --logging_dir.')
flags.DEFINE_boolean(
'version',
None,
'Optional. If true, print out version number and exit.',
allow_hide_cpp=True)
# Optional flags for call_variants.
flags.DEFINE_string(
'customized_model', None,
'Optional. A path to a model checkpoint to load for the `call_variants` '
'step. If not set, the default for each --model_type will be used')
# Optional flags for make_examples.
flags.DEFINE_integer('num_shards', 1,
'Optional. Number of shards for make_examples step.')
flags.DEFINE_string(
'regions', None,
'Optional. Space-separated list of regions we want to process. Elements '
'can be region literals (e.g., chr20:10-20) or paths to BED/BEDPE files.')
flags.DEFINE_string(
'sample_name', None,
'Sample name to use instead of the sample name from the input reads BAM '
'(SM tag in the header). This flag is used for both make_examples and '
'postprocess_variants.')
flags.DEFINE_boolean(
'use_hp_information', None,
'Optional. If True, corresponding flags will be set to properly use the HP '
'information present in the BAM input.')
flags.DEFINE_string(
'make_examples_extra_args', None,
'A comma-separated list of flag_name=flag_value. "flag_name" has to be '
'valid flags for make_examples.py. If the flag_value is boolean, it has to '
'be flag_name=true or flag_name=false.')
flags.DEFINE_string(
'call_variants_extra_args', None,
'A comma-separated list of flag_name=flag_value. "flag_name" has to be '
'valid flags for call_variants.py. If the flag_value is boolean, it has to '
'be flag_name=true or flag_name=false.')
flags.DEFINE_string(
'postprocess_variants_extra_args', None,
'A comma-separated list of flag_name=flag_value. "flag_name" has to be '
'valid flags for postprocess_variants.py. If the flag_value is boolean, '
'it has to be flag_name=true or flag_name=false.')
# Optional flags for postprocess_variants.
flags.DEFINE_string('output_gvcf', None,
'Optional. Path where we should write gVCF file.')
flags.DEFINE_boolean(
'vcf_stats_report', True, 'Optional. Output a visual report (HTML) of '
'statistics about the output VCF.')
MODEL_TYPE_MAP = {
'WGS': '/opt/models/wgs/model.ckpt',
'WES': '/opt/models/wes/model.ckpt',
'PACBIO': '/opt/models/pacbio/model.ckpt',
'HYBRID_PACBIO_ILLUMINA': '/opt/models/hybrid_pacbio_illumina/model.ckpt',
}
# Current release version of DeepVariant.
# Should be the same in dv_vcf_constants.py.
DEEP_VARIANT_VERSION = '1.3.0'
def _is_quoted(value):
if value.startswith('"') and value.endswith('"'):
return True
if value.startswith("'") and value.endswith("'"):
return True
return False
def _add_quotes(value):
if isinstance(value, str) and _is_quoted(value):
return value
return '"{}"'.format(value)
def _extra_args_to_dict(extra_args):
"""Parses comma-separated list of flag_name=flag_value to dict."""
args_dict = {}
if extra_args is None:
return args_dict
for extra_arg in extra_args.split(','):
(flag_name, flag_value) = extra_arg.split('=')
flag_name = flag_name.strip('-')
# Check for boolean values.
if flag_value.lower() == 'true':
flag_value = True
elif flag_value.lower() == 'false':
flag_value = False
args_dict[flag_name] = flag_value
return args_dict
def _extend_command_by_args_dict(command, extra_args):
"""Adds `extra_args` to the command string."""
for key in sorted(extra_args):
value = extra_args[key]
if value is None:
continue
if isinstance(value, bool):
added_arg = '' if value else 'no'
added_arg += key
command.extend(['--' + added_arg])
else:
command.extend(['--' + key, _add_quotes(value)])
return command
def _update_kwargs_with_warning(kwargs, extra_args, conflict_args=None):
"""Updates `kwargs` with `extra_args`; crashes if `conflict_args` changed."""
for k, v in extra_args.items():
if k in kwargs:
if conflict_args is not None and k in conflict_args and kwargs[k] != v:
raise ValueError(
'The extra_args "{}" might have conflicts with other flags. '
'See '
'https://github.com/google/deepvariant/blob/r1.3/docs/'
'deepvariant-pacbio-model-case-study.md#clarification-'
'of-the---use_hp_information-flag '
'for an explanation, or report this issue on '
'https://github.com/google/deepvariant/issues.'.format(k))
if kwargs[k] != v:
print('\nWarning: --{} is previously set to {}, now to {}.'.format(
k, kwargs[k], v))
kwargs[k] = v
return kwargs
def make_examples_command(ref,
reads,
examples,
extra_args,
runtime_by_region_path=None,
**kwargs):
"""Returns a make_examples (command, logfile) for subprocess.
Args:
ref: Input FASTA file.
reads: Input BAM file.
examples: Output tfrecord file containing tensorflow.Example files.
extra_args: Comma-separated list of flag_name=flag_value.
runtime_by_region_path: Output path for runtime by region metrics.
**kwargs: Additional arguments to pass in for make_examples.
Returns:
(string, string) A command to run, and a log file to output to.
"""
command = [
'time', 'seq 0 {} |'.format(FLAGS.num_shards - 1),
'parallel -q --halt 2 --line-buffer', '/opt/deepvariant/bin/make_examples'
]
command.extend(['--mode', 'calling'])
command.extend(['--ref', '"{}"'.format(ref)])
command.extend(['--reads', '"{}"'.format(reads)])
command.extend(['--examples', '"{}"'.format(examples)])
if runtime_by_region_path is not None:
command.extend(
['--runtime_by_region', '"{}"'.format(runtime_by_region_path)])
conflict_args = None
if FLAGS.model_type == 'PACBIO':
special_args = {}
special_args['pileup_image_width'] = 199
special_args['realign_reads'] = False
special_args['vsc_min_fraction_indels'] = 0.12
special_args['alt_aligned_pileup'] = 'diff_channels'
special_args['add_hp_channel'] = True
special_args['sort_by_haplotypes'] = special_args[
'parse_sam_aux_fields'] = bool(FLAGS.use_hp_information)
kwargs = _update_kwargs_with_warning(kwargs, special_args)
conflict_args = ['sort_by_haplotypes', 'parse_sam_aux_fields']
# Extend the command with all items in kwargs and extra_args.
kwargs = _update_kwargs_with_warning(kwargs, _extra_args_to_dict(extra_args),
conflict_args)
command = _extend_command_by_args_dict(command, kwargs)
command.extend(['--task {}'])
logfile = None
if FLAGS.logging_dir:
logfile = '{}/make_examples.log'.format(FLAGS.logging_dir)
return (' '.join(command), logfile)
def call_variants_command(outfile, examples, model_ckpt,
intermediate_results_dir, extra_args):
"""Returns a call_variants (command, logfile) for subprocess."""
command = ['time', '/opt/deepvariant/bin/call_variants']
command.extend(['--outfile', '"{}"'.format(outfile)])
command.extend(['--examples', '"{}"'.format(examples)])
command.extend(['--checkpoint', '"{}"'.format(model_ckpt)])
# --openvino_model_dir will only be used if use_openvino is set to true for
# call_variants. But it won't hurt to set it anyway, so setting it to the
# intermediate_results_dir.
command.extend(
['--openvino_model_dir', '"{}"'.format(intermediate_results_dir)])
# Extend the command with all items in extra_args.
command = _extend_command_by_args_dict(command,
_extra_args_to_dict(extra_args))
logfile = None
if FLAGS.logging_dir:
logfile = '{}/call_variants.log'.format(FLAGS.logging_dir)
return (' '.join(command), logfile)
def postprocess_variants_command(ref,
infile,
outfile,
extra_args,
nonvariant_site_tfrecord_path=None,
gvcf_outfile=None,
vcf_stats_report=True,
sample_name=None):
"""Returns a postprocess_variants (command, logfile) for subprocess."""
command = ['time', '/opt/deepvariant/bin/postprocess_variants']
command.extend(['--ref', '"{}"'.format(ref)])
command.extend(['--infile', '"{}"'.format(infile)])
command.extend(['--outfile', '"{}"'.format(outfile)])
if nonvariant_site_tfrecord_path is not None:
command.extend([
'--nonvariant_site_tfrecord_path',
'"{}"'.format(nonvariant_site_tfrecord_path)
])
if gvcf_outfile is not None:
command.extend(['--gvcf_outfile', '"{}"'.format(gvcf_outfile)])
if not vcf_stats_report:
command.extend(['--novcf_stats_report'])
if sample_name is not None:
command.extend(['--sample_name', '"{}"'.format(sample_name)])
# Extend the command with all items in extra_args.
command = _extend_command_by_args_dict(command,
_extra_args_to_dict(extra_args))
logfile = None
if FLAGS.logging_dir:
logfile = '{}/postprocess_variants.log'.format(FLAGS.logging_dir)
return (' '.join(command), logfile)
def runtime_by_region_vis_command(runtime_by_region_path: str):
"""Returns a runtime_by_region_vis (command, logfile=None) for subprocess."""
runtime_report = os.path.join(FLAGS.logging_dir,
'make_examples_runtime_by_region_report.html')
command = ['time', '/opt/deepvariant/bin/runtime_by_region_vis']
command.extend(['--input', '"{}"'.format(runtime_by_region_path)])
command.extend(['--title', '"{}"'.format('DeepVariant')])
command.extend(['--output', '"{}"'.format(runtime_report)])
return (' '.join(command), None)
def check_or_create_intermediate_results_dir(intermediate_results_dir):
"""Checks or creates the path to the directory for intermediate results."""
if intermediate_results_dir is None:
intermediate_results_dir = tempfile.mkdtemp()
if not os.path.isdir(intermediate_results_dir):
logging.info('Creating a directory for intermediate results in %s',
intermediate_results_dir)
os.makedirs(intermediate_results_dir)
else:
logging.info('Re-using the directory for intermediate results in %s',
intermediate_results_dir)
return intermediate_results_dir
def check_flags():
"""Additional logic to make sure flags are set appropriately."""
if FLAGS.customized_model is not None:
if (not tf.compat.v1.gfile.Exists(FLAGS.customized_model +
'.data-00000-of-00001') or
not tf.compat.v1.gfile.Exists(FLAGS.customized_model + '.index') or
not tf.compat.v1.gfile.Exists(FLAGS.customized_model + '.meta')):
raise RuntimeError('The model files {}* do not exist. Potentially '
'relevant issue: '
'https://github.com/google/deepvariant/blob/r1.3/docs/'
'FAQ.md#why-cant-it-find-one-of-the-input-files-eg-'
'could-not-open'.format(FLAGS.customized_model))
logging.info(
'You set --customized_model. Instead of using the default '
'model for %s, `call_variants` step will load %s* '
'instead.', FLAGS.model_type, FLAGS.customized_model)
if FLAGS.use_hp_information and FLAGS.model_type != 'PACBIO':
raise ValueError('--use_hp_information can only be used with '
'--model_type="PACBIO"')
def get_model_ckpt(model_type, customized_model):
"""Return the path to the model checkpoint based on the input args."""
if customized_model is not None:
return customized_model
else:
return MODEL_TYPE_MAP[model_type]
def create_all_commands_and_logfiles(intermediate_results_dir):
"""Creates 3 (command, logfile) to be executed later."""
check_flags()
commands = []
# make_examples
nonvariant_site_tfrecord_path = None
if FLAGS.output_gvcf is not None:
nonvariant_site_tfrecord_path = os.path.join(
intermediate_results_dir,
'gvcf.tfrecord@{}.gz'.format(FLAGS.num_shards))
examples = os.path.join(
intermediate_results_dir,
'make_examples.tfrecord@{}.gz'.format(FLAGS.num_shards))
if FLAGS.logging_dir and FLAGS.runtime_report:
runtime_directory = os.path.join(FLAGS.logging_dir,
'make_examples_runtime_by_region')
if not os.path.isdir(runtime_directory):
logging.info('Creating a make_examples runtime by region directory in %s',
runtime_directory)
os.makedirs(runtime_directory)
# The path to runtime metrics output is sharded just like the examples.
runtime_by_region_path = os.path.join(
runtime_directory,
'make_examples_runtime@{}.tsv'.format(FLAGS.num_shards))
else:
runtime_by_region_path = None
commands.append(
make_examples_command(
ref=FLAGS.ref,
reads=FLAGS.reads,
examples=examples,
runtime_by_region_path=runtime_by_region_path,
extra_args=FLAGS.make_examples_extra_args,
# kwargs:
gvcf=nonvariant_site_tfrecord_path,
regions=FLAGS.regions,
sample_name=FLAGS.sample_name))
# call_variants
call_variants_output = os.path.join(intermediate_results_dir,
'call_variants_output.tfrecord.gz')
model_ckpt = get_model_ckpt(FLAGS.model_type, FLAGS.customized_model)
commands.append(
call_variants_command(call_variants_output, examples, model_ckpt,
intermediate_results_dir,
FLAGS.call_variants_extra_args))
# postprocess_variants
commands.append(
postprocess_variants_command(
FLAGS.ref,
call_variants_output,
FLAGS.output_vcf,
FLAGS.postprocess_variants_extra_args,
nonvariant_site_tfrecord_path=nonvariant_site_tfrecord_path,
gvcf_outfile=FLAGS.output_gvcf,
vcf_stats_report=FLAGS.vcf_stats_report,
sample_name=FLAGS.sample_name))
# runtime-by-region
if FLAGS.logging_dir and FLAGS.runtime_report:
commands.append(runtime_by_region_vis_command(runtime_by_region_path))
return commands
def main(_):
if FLAGS.version:
print('DeepVariant version {}'.format(DEEP_VARIANT_VERSION))
return
for flag_key in ['model_type', 'ref', 'reads', 'output_vcf']:
if FLAGS.get_flag_value(flag_key, None) is None:
sys.stderr.write('--{} is required.\n'.format(flag_key))
sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
sys.exit(1)
intermediate_results_dir = check_or_create_intermediate_results_dir(
FLAGS.intermediate_results_dir)
if FLAGS.logging_dir and not os.path.isdir(FLAGS.logging_dir):
logging.info('Creating a directory for logs in %s', FLAGS.logging_dir)
os.makedirs(FLAGS.logging_dir)
commands_logfiles = create_all_commands_and_logfiles(intermediate_results_dir)
print('\n***** Intermediate results will be written to {} '
'in docker. ****\n'.format(intermediate_results_dir))
for command, logfile in commands_logfiles:
print('\n***** Running the command:*****\n{}\n'.format(command))
if not FLAGS.dry_run:
fp = open(logfile, 'w') if logfile is not None else None
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
shell=True,
executable='/bin/bash',
universal_newlines=True) as proc:
for line in proc.stdout:
print(line, end='')
if fp is not None:
print(line, end='', file=fp)
if fp is not None:
fp.close()
if proc.returncode != 0:
sys.exit(proc.returncode)
if __name__ == '__main__':
app.run(main)
| google/deepvariant | scripts/run_deepvariant.py | Python | bsd-3-clause | 19,921 |
import os, sys
from fabric.api import task
from management.shell import cpR, stream
from management.settings import settings
conf = settings(__package__,strip_leading=1)
from .stylesheets import buildcss
#def compile_js(src,dst=None):
#if dst is not None:
##stream("browserify "+src,program=True).pipe("babel --presets es2015").save(dst)
##stream("browserify -t babelify --presets es2015 "+src,program=True).save(dst)
#stream("browserify "+src,program=True).save(dst)
#else:
##return stream("browserify "+src,program=True).pipe("babel --presets es2015")
##return stream("browserify -t babelify --presets es2015 "+src,program=True)
#return stream("browserify "+src,program=True)
#import jinja2
#from jinja2.filters import environmentfilter
#jinja_env=jinja2.Environment(extensions=['jinja2.ext.autoescape'])
#jinja_env.loader=jinja2.FileSystemLoader(["."])
#def render_tpl(tpl,context,dst):
#stream(jinja_env.get_template(tpl).render(context)).save(dst)
@task
def copy_assets():
assets = conf.assets
for asset in assets.values():
cpR(asset['source'],asset['target'],pattern=asset['pattern'],create_parents=True)
@task
def deploy():
buildcss()
copy_assets()
@task
def serve():
import SimpleHTTPServer
import SocketServer
os.chdir('www')
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
| jonathanverner/circular | management/web/deploy.py | Python | mit | 1,545 |
from django.conf import settings
from django.contrib.auth.hashers import get_hasher
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core.validators import RegexValidator
from django.db import models
from era import random_str
from era.utils.translation import _, verbose_choices
from .communicators import CommunicationMixin
class Confirm(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=30)
code = models.CharField(max_length=128, verbose_name=_('code'), unique=True)
sign = models.CharField(max_length=128, verbose_name=_('confirmation'))
@classmethod
def gen_code(cls, salt=None):
hasher = get_hasher()
generate = True
while generate:
code = random_str()
encoded = hasher.encode(code, salt or hasher.salt())
generate = bool(cls.objects.filter(code=encoded).count())
return code, encoded
class BaseUser(CommunicationMixin, AbstractBaseUser):
class Meta:
abstract = True
verbose_name = _('user')
verbose_name_plural = _('users')
USERNAME_FIELD = 'email'
objects = BaseUserManager()
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('date joined'))
access = models.BooleanField(_('access'), default=True)
email = models.EmailField(verbose_name=_('email'), unique=True, null=True)
name = models.CharField(verbose_name=_('username'), max_length=20, null=True)
role = models.CharField(
verbose_name=_('role'),
max_length=10,
choices=verbose_choices(*settings.USER_ROLES))
@property
def username_value(self):
return getattr(self, self.USERNAME_FIELD)
@property
def username_dict(self):
return {self.USERNAME_FIELD: self.username_value}
def __str__(self):
return '{0} <{1}>'.format(self.name, self.username_value)
def get_short_name(self):
return self.name
| doctorzeb8/django-era | era/apps/user/models.py | Python | mit | 1,997 |
#! /usr/bin/python
#
# Copyright 2009, 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import errno
import grp
import os
import pwd
import socket
import subprocess
import sys
import tempfile
from lazr.config import as_username_groupname
from lp.services.config import config
from lp.services.mailman.config import (
configure_prefix,
configure_siteowner,
)
from lp.services.mailman.monkeypatches import monkey_patch
basepath = [part for part in sys.path if part]
def build_mailman():
# Build and install Mailman if it is enabled and not yet built.
if not config.mailman.build:
# There's nothing to do.
return 0
mailman_path = configure_prefix(config.mailman.build_prefix)
mailman_bin = os.path.join(mailman_path, 'bin')
var_dir = os.path.abspath(config.mailman.build_var_dir)
# If we can import the package, we assume Mailman is properly built at
# the least. This does not catch re-installs that might be necessary
# should our copy in sourcecode be updated. Do that manually.
sys.path.append(mailman_path)
try:
import Mailman
except ImportError:
# sys.path_importer_cache is a mapping of elements of sys.path to
# importer objects used to handle them. In Python2.5+ when an element
# of sys.path is found to not exist on disk, a NullImporter is created
# and cached - this causes Python to never bother re-inspecting the
# disk for that path element. We must clear that cache element so that
# our second attempt to import MailMan after building it will actually
# check the disk.
del sys.path_importer_cache[mailman_path]
need_build = need_install = True
else:
need_build = need_install = False
# Also check for Launchpad-specific bits stuck into the source tree by
# monkey_patch(), in case this is half-installed. See
# <https://bugs.launchpad.net/launchpad-registry/+bug/683486>.
try:
from Mailman.Queue import XMLRPCRunner
from Mailman.Handlers import LPModerate
except ImportError:
# Monkey patches not present, redo install and patch steps.
need_install = True
# Make sure the target directories exist and have the correct
# permissions, otherwise configure will complain.
user, group = as_username_groupname(config.mailman.build_user_group)
# Now work backwards to get the uid and gid
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
print >> sys.stderr, 'No user found:', user
sys.exit(1)
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
print >> sys.stderr, 'No group found:', group
sys.exit(1)
# Ensure that the var_dir exists, is owned by the user:group, and has
# the necessary permissions. Set the mode separately after the
# makedirs() call because some platforms ignore mkdir()'s mode (though
# I think Linux does not ignore it -- better safe than sorry).
try:
os.makedirs(var_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
# Just created the var directory, will need to install mailmain bits.
need_install = True
os.chown(var_dir, uid, gid)
os.chmod(var_dir, 02775)
# Skip mailman setup if nothing so far has shown a reinstall needed.
if not need_install:
return 0
mailman_source = os.path.join('sourcecode', 'mailman')
if config.mailman.build_host_name:
build_host_name = config.mailman.build_host_name
else:
build_host_name = socket.getfqdn()
# Build and install the Mailman software. Note that we don't care about
# --with-cgi-gid because we're not going to use that Mailman subsystem.
executable = os.path.abspath('bin/py')
configure_args = (
'./configure',
'--prefix', mailman_path,
'--with-var-prefix=' + var_dir,
'--with-python=' + executable,
'--with-username=' + user,
'--with-groupname=' + group,
'--with-mail-gid=' + group,
'--with-mailhost=' + build_host_name,
'--with-urlhost=' + build_host_name,
)
if need_build:
# Configure.
retcode = subprocess.call(configure_args, cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not configure Mailman:'
sys.exit(retcode)
# Make.
retcode = subprocess.call(('make', ), cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not make Mailman.'
sys.exit(retcode)
retcode = subprocess.call(('make', 'install'), cwd=mailman_source)
if retcode:
print >> sys.stderr, 'Could not install Mailman.'
sys.exit(retcode)
# Try again to import the package.
try:
import Mailman
except ImportError:
print >> sys.stderr, 'Could not import the Mailman package'
return 1
# Check to see if the site list exists. The output can go to /dev/null
# because we don't really care about it. The site list exists if
# config_list returns a zero exit status, otherwise it doesn't
# (probably). Before we can do this however, we must monkey patch
# Mailman, otherwise mm_cfg.py won't be set up correctly.
monkey_patch(mailman_path, config)
import Mailman.mm_cfg
retcode = subprocess.call(
('./config_list', '-o', '/dev/null',
Mailman.mm_cfg.MAILMAN_SITE_LIST),
cwd=mailman_bin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode:
addr, password = configure_siteowner(
config.mailman.build_site_list_owner)
# The site list does not yet exist, so create it now.
retcode = subprocess.call(
('./newlist', '--quiet',
'--emailhost=' + build_host_name,
Mailman.mm_cfg.MAILMAN_SITE_LIST,
addr, password),
cwd=mailman_bin)
if retcode:
print >> sys.stderr, 'Could not create site list'
return retcode
retcode = configure_site_list(
mailman_bin, Mailman.mm_cfg.MAILMAN_SITE_LIST)
if retcode:
print >> sys.stderr, 'Could not configure site list'
return retcode
# Create a directory to hold the gzip'd tarballs for the directories of
# deactivated lists.
try:
os.mkdir(os.path.join(Mailman.mm_cfg.VAR_PREFIX, 'backups'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
return 0
def configure_site_list(mailman_bin, site_list_name):
"""Configure the site list.
Currently, the only thing we want to set is to not advertise the
site list.
"""
fd, config_file_name = tempfile.mkstemp()
try:
os.close(fd)
config_file = open(config_file_name, 'w')
try:
print >> config_file, 'advertised = False'
finally:
config_file.close()
return subprocess.call(
('./config_list', '-i', config_file_name, site_list_name),
cwd=mailman_bin)
finally:
os.remove(config_file_name)
def main():
# setting python paths
program = sys.argv[0]
src = 'lib'
here = os.path.dirname(os.path.abspath(program))
srcdir = os.path.join(here, src)
sys.path = [srcdir, here] + basepath
return build_mailman()
if __name__ == '__main__':
return_code = main()
sys.exit(return_code)
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/buildmailman.py | Python | agpl-3.0 | 7,591 |
"""
models
~~~~~~
Module containing all of our models that are typically
accessed in a CRUD like manner.
"""
from ..models.base import Model as BaseModel
from ..models.default_schema import Model as DefaultSchemaModel
from ..models.login import Model as LoginModel
MODELS = [
BaseModel,
DefaultSchemaModel,
LoginModel,
]
| sassoo/goldman | goldman/models/__init__.py | Python | mit | 353 |
# -*- coding: utf-8 -*-
# __author__ = '磊'
from django.db import models
import datetime
class User(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=200)
nickname = models.CharField(max_length=10)
email = models.EmailField()
phone = models.CharField(max_length=20)
create_time = models.DateTimeField(default=datetime.datetime.now())
last_login_time = models.DateTimeField(null=True)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.username
def is_authenticated(self):
return True
class PermissionGroup(models.Model):
name = models.CharField(max_length=200)
module = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Permission(models.Model):
name = models.CharField(max_length=200)
action = models.CharField(max_length=200)
action_group = models.ForeignKey(PermissionGroup)
def __unicode__(self):
return self.name
class Menu(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=255)
parent_menu = models.IntegerField()
url = models.CharField(max_length=255)
sort = models.IntegerField()
is_leaft = models.BooleanField(default=False)
is_available = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=255)
create_username = models.CharField(max_length=50)
create_time = models.DateTimeField(default=datetime.datetime.now())
users = models.ManyToManyField(User)
permission = models.ManyToManyField(Permission)
menu = models.ManyToManyField(Menu)
is_available = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class AnonymousUser(object):
id = None
pk = None
username = ''
def __init__(self):
pass
def is_authenticated(self):
return False
| GavinZhuLei/GavinsDjango | mysite/gauth/models.py | Python | apache-2.0 | 2,086 |
#!/usr/bin/env python
"""
Parse vt100 input and print keys.
For testing terminal input.
(This does not use the `Input` implementation, but only the `Vt100Parser`.)
"""
import sys
from prompt_toolkit.input.vt100 import raw_mode
from prompt_toolkit.input.vt100_parser import Vt100Parser
from prompt_toolkit.keys import Keys
def callback(key_press):
print(key_press)
if key_press.key == Keys.ControlC:
sys.exit(0)
def main():
stream = Vt100Parser(callback)
with raw_mode(sys.stdin.fileno()):
while True:
c = sys.stdin.read(1)
stream.feed(c)
if __name__ == "__main__":
main()
| jonathanslenders/python-prompt-toolkit | tools/debug_vt100_input.py | Python | bsd-3-clause | 642 |
"""
The objects used by the site module to add custom builtins.
"""
# Those objects are almost immortal and they keep a reference to their module
# globals. Defining them in the site module would keep too many references
# alive.
# Note this means this module should also avoid keep things alive in its
# globals.
import sys
class Quitter(object):
def __init__(self, name, eof):
self.name = name
self.eof = eof
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, self.eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
import os
self.__name = name
self.__data = data
self.__lines = None
self.__filenames = [os.path.join(dir, filename)
for dir in dirs
for filename in files]
def __setup(self):
if self.__lines:
return
data = None
for filename in self.__filenames:
try:
with open(filename, "r") as fp:
data = fp.read()
break
except OSError:
pass
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help that provides a helpful message
when 'help' is typed at the Python interactive prompt.
Calling help() at the Python prompt starts an interactive help session.
Calling help(thing) prints help for the python object 'thing'.
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
| Orav/kbengine | kbe/src/lib/python/Lib/_sitebuiltins.py | Python | lgpl-3.0 | 3,218 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.tagline'
db.add_column(u'product_product', 'tagline',
self.gf('django.db.models.fields.CharField')(default='<<enter tagline>>', max_length=300),
keep_default=False)
# Changing field 'Product.slug'
db.alter_column(u'product_product', 'slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from='title', unique=True, null=True))
# Adding unique constraint on 'Product', fields ['slug']
db.create_unique(u'product_product', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Product', fields ['slug']
db.delete_unique(u'product_product', ['slug'])
# Deleting field 'Product.tagline'
db.delete_column(u'product_product', 'tagline')
# User chose to not deal with backwards NULL issues for 'Product.slug'
raise RuntimeError("Cannot reverse this migration. 'Product.slug' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Product.slug'
db.alter_column(u'product_product', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=50))
models = {
u'product.product': {
'Meta': {'object_name': 'Product'},
'demo_video_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.RichTextField', [], {}),
'documentation_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_category': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'blank': 'True', 'populate_from': "'title'", 'unique': 'True', 'null': 'True'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'product.productimage': {
'Meta': {'object_name': 'ProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['product.Product']"})
}
}
complete_apps = ['product'] | AmandaCMS/amanda-cms | amanda/product/migrations/0002_auto__add_field_product_tagline__chg_field_product_slug__add_unique_pr.py | Python | mit | 2,908 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationControllerList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1ReplicationController]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1ReplicationControllerList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1ReplicationControllerList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1ReplicationControllerList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ReplicationControllerList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ReplicationControllerList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1ReplicationControllerList.
List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
:return: The items of this V1ReplicationControllerList.
:rtype: list[V1ReplicationController]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ReplicationControllerList.
List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
:param items: The items of this V1ReplicationControllerList.
:type: list[V1ReplicationController]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1ReplicationControllerList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1ReplicationControllerList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ReplicationControllerList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ReplicationControllerList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1ReplicationControllerList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1ReplicationControllerList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ReplicationControllerList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1ReplicationControllerList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicationControllerList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| mbohlool/client-python | kubernetes/client/models/v1_replication_controller_list.py | Python | apache-2.0 | 6,919 |
build_config = {
"folders": [
u'call'
]
}
| ohio813/alpha3 | x86/latin_1/mixedcase/getpc/build_config.py | Python | bsd-3-clause | 55 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
import logging
from six import python_2_unicode_compatible
from django.core import validators
from django.db import models
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils.encoding import smart_str
from excel_data_sync.validators import THIS_COL, RuleEngine
from xlsxwriter.worksheet import convert_cell_args
logger = logging.getLogger(__name__)
fmts = [
'general',
'0',
'0.00',
'#,##0',
'#,##0.00',
'"$"#,##0_);("$"#,##',
'"$"#,##0_);[Red]("$"#,##',
'"$"#,##0.00_);("$"#,##',
'"$"#,##0.00_);[Red]("$"#,##',
'0%',
'0.00%',
'0.00E+00',
'# ?/?',
'# ??/??',
'M/D/YY',
'D-MMM-YY',
'D-MMM',
'MMM-YY',
'h:mm AM/PM',
'h:mm:ss AM/PM',
'h:mm',
'h:mm:ss',
'M/D/YY h:mm',
'_(#,##0_);(#,##0)',
'_(#,##0_);[Red](#,##0)',
'_(#,##0.00_);(#,##0.00)',
'_(#,##0.00_);[Red](#,##0.00)',
'_("$"* #,##0_);_("$"* (#,##0);_("$"* "-"_);_(@_)',
'_(* #,##0_);_(* (#,##0);_(* "-"_);_(@_)',
'_("$"* #,##0.00_);_("$"* (#,##0.00);_("$"* "-"??_);_(@_)',
'_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(@_)',
'mm:ss',
'[h]:mm:ss',
'mm:ss.0',
'##0.0E+0',
'@'
]
class Header(object):
format = {'bold': True,
'locked': 1,
'align': 'center',
'valign': 'top',
'bg_color': '#FFFFB8',
'shrink': True,
'bottom': 2,
'left': 1,
'text_wrap': True,
'num_format': ''}
num_format = ''
def __init__(self, column):
self.column = column
self.column.header = self
self.title = column.verbose_name.title()
def _get_format(self):
fmt = dict(self.format)
fmt['num_format'] = self.num_format
return self.column._sheet._book.add_format(fmt)
@python_2_unicode_compatible
class Column(object):
format = {'locked': 0}
num_format = ''
main_validators = None
validate = 'custom'
ignore_field_validators = False
length = None
need_vba = False
col_width = None
def __init__(self, field, options=None):
self.field = field
self.options = options or {}
self.field_type = type(field)
self.rule_parser = RuleEngine(self.main_validators)
self.default = field.default
self.max_length = field.max_length
self.min_length = None
self.max_value = None
self.min_value = None
@convert_cell_args
def write_cell(self, row, col, record, *args):
v = self._get_value_from_object(record)
self._sheet.write(row, col, v, self._get_format())
@convert_cell_args
def format_column(self):
self._sheet.set_column(self.number,
self.number,
width=self.get_col_width(),
cell_format=self._get_format())
def get_col_width(self):
return self.col_width
def _get_value_from_object(self, record):
if self.field.choices:
getter = 'get_{}_display'.format(self.field.name)
return getattr(record, getter)()
return getattr(record, self.field.name)
# def to_xls(self, value):
# """
# Converts the input value into the expected Python data type, raising
# django.core.exceptions.ValidationError if the data can't be converted.
# Returns the converted value. Subclasses should override this.
# """
# return value
#
# def to_python(self, value):
# """
# Converts the input value into the expected Python data type, raising
# django.core.exceptions.ValidationError if the data can't be converted.
# Returns the converted value. Subclasses should override this.
# """
# return value
def _get_format(self, **kwargs):
fmt = dict(self.format)
fmt['num_format'] = self.num_format
fmt['locked'] = not self.field.editable
fmt.update(kwargs)
return self._sheet._book.add_format(fmt)
def process_field_validators(self):
if self.ignore_field_validators:
return
if self.max_length is not None:
self.rule_parser.append("max_length")
for validator in self.field.validators:
if isinstance(validator, validators.MaxLengthValidator):
self.max_length = validator.limit_value
self.rule_parser.append("max_length")
elif isinstance(validator, validators.MaxValueValidator):
self.max_value = validator.limit_value
self.rule_parser.append("max")
elif isinstance(validator, validators.MinValueValidator):
self.min_value = validator.limit_value
self.rule_parser.append("min")
def add_data_validation(self):
self._sheet.data_validation(1, self.number,
65000, self.number,
self._get_validation())
def _get_validation(self):
rule = {"validate": "custom", "criteria": ""}
try:
if self.field.unique:
self.rule_parser.append("unique")
if not self.field.blank:
self.rule_parser.append("required")
rule["ignore_blank"] = False
if self.field.choices:
return {"validate": "list",
"dropdown": True,
"value": [x[1] for x in self.field.choices]}
self.process_field_validators()
context = dict(current_column=THIS_COL,
max_value=self.max_value,
min_value=self.min_value,
length=self.length,
min_length=self.min_length,
max_length=self.max_length)
if self.rule_parser:
formula = "=AND(%s)" % ",".join(self.rule_parser.get_rule(context))
else:
rule["validate"] = "any"
formula = ""
rule["value"] = formula
rule["error_message"] = "\n".join(self.rule_parser.get_messages(context))
return rule
except Exception as e: # pragma: no cover
logger.exception(e)
return {"validate": "any"}
def __getattr__(self, item):
if item in ('blank', 'null', 'max_length', 'name', 'related_model',
'choices', 'unique', 'verbose_name',):
return getattr(self.field, item)
raise AttributeError(item) # pragma: no cover
def __repr__(self):
return smart_str("<{0.__class__.__name__} '{0.verbose_name}'>".format(self))
def __str__(self):
return """<Column {0.verbose_name}>""".format(self)
class DateColumn(Column):
# format = {'locked': 0, 'shrink': True}
_format_attr = 'default_date_format'
validate = "date"
epoch = datetime.datetime(1900, 1, 1)
col_width = 10
def _get_format(self):
return getattr(self._sheet._book, self._format_attr)
@convert_cell_args
def write_cell(self, row, col, record, *args):
v = self._get_value_from_object(record)
self._sheet.write_datetime(row, col, v, self._get_format())
def _get_validation(self):
self.process_field_validators()
value = self.epoch
criteria = ">="
maximum = None
if self.rule_parser:
if "min" in self.rule_parser and "max" in self.rule_parser:
criteria = "between"
value = self.min_value
maximum = self.max_value
elif "min" in self.rule_parser:
criteria = ">="
value = self.min_value
elif "max" in self.rule_parser:
criteria = "<="
value = self.max_value
return {"validate": self.validate,
"criteria": criteria,
"value": value,
"maximum": maximum}
class DateTimeColumn(DateColumn):
_format_attr = 'default_datetime_format'
validate = "date"
col_width = 20
def _get_value_from_object(self, record):
v = super(DateColumn, self)._get_value_from_object(record)
return v.astimezone(self._sheet._book.timezone).replace(tzinfo=None)
class TimeColumn(DateColumn):
_format_attr = 'default_time_format'
validate = "time"
class NumberColumn(Column):
num_format = '#,##'
main_validators = ["number"]
def __init__(self, field, options=None):
super(NumberColumn, self).__init__(field, options)
self.min_value, self.max_value = BaseDatabaseOperations.integer_field_ranges[field.get_internal_type()]
def process_field_validators(self):
super(NumberColumn, self).process_field_validators()
self.rule_parser.append("min")
self.rule_parser.append("max")
class SmallIntegerColumn(NumberColumn):
pass
class IntegerColumn(NumberColumn):
pass
class BigIntegerColumn(NumberColumn):
pass
class PositiveSmallIntegerColumn(NumberColumn):
pass
class PositiveIntegerColumn(NumberColumn):
def __init__(self, field, options=None):
super(PositiveIntegerColumn, self).__init__(field, options)
class AutoColumn(NumberColumn):
as_internal_type = "IntegerField"
format = {'locked': 1}
def __init__(self, field, options=None):
super(NumberColumn, self).__init__(field, options)
self.min_value, self.max_value = BaseDatabaseOperations.integer_field_ranges[self.as_internal_type]
@convert_cell_args
def write_cell(self, row, col, record, *args):
v = self._get_value_from_object(record)
self._sheet.write(row, col, v, self._get_format(locked=1))
class BigAutoColumn(AutoColumn):
as_internal_type = "BigIntegerField"
class DecimalColumn(Column):
num_format = '#,##0.00'
main_validators = ["number"]
class FloatColumn(Column):
num_format = '#,##0.00'
main_validators = ["number"]
class BooleanColumn(Column):
def _get_validation(self):
return {"validate": "list",
"dropdown": True,
"value": ["True", "False"]}
class NullBooleanColumn(Column):
def _get_validation(self):
return {"validate": "list",
"dropdown": True,
"value": ["", "True", "False"]}
class IpAddressColumn(Column):
main_validators = ["ip"]
class UUIDColumn(Column):
num_format = 'general'
main_validators = ["uuid", "length"]
length = 32
ignore_field_validators = True
def _get_value_from_object(self, record):
return getattr(record, self.field.name).hex
class TextColumn(Column):
num_format = 'general'
class EmailColumn(Column):
main_validators = ["email"]
class ForeignKeyColumn(Column):
def _get_value_from_object(self, record):
return str(getattr(record, self.field.name))
def add_data_validation(self):
sheet_name = '{0.app_label}.{0.model_name}'.format(self.field.related_model._meta)
fksheet = self._sheet._book.add_worksheet(sheet_name)
if self._sheet._book.hide:
fksheet.hide()
for i, opt in enumerate([[x.pk, str(x)] for x in self.field.rel.model.objects.all()]):
id, label = opt
fksheet.write(i, 0, id)
fksheet.write(i, 1, label)
self._sheet.data_validation(1, self.number,
65000, self.number,
{"validate": "list",
"dropdown": True,
"value": "=example.option!$B:$B"
}
)
def _get_validation(self):
return {}
mapping = {models.Field: Column,
models.SmallIntegerField: SmallIntegerColumn,
models.IntegerField: IntegerColumn,
models.BigIntegerField: BigIntegerColumn,
models.PositiveSmallIntegerField: PositiveSmallIntegerColumn,
models.PositiveIntegerField: PositiveIntegerColumn,
models.GenericIPAddressField: IpAddressColumn,
models.AutoField: AutoColumn,
models.ForeignKey: ForeignKeyColumn,
models.BooleanField: BooleanColumn,
models.NullBooleanField: NullBooleanColumn,
models.DecimalField: DecimalColumn,
models.FloatField: FloatColumn,
models.DateField: DateColumn,
models.DateTimeField: DateTimeColumn,
models.TimeField: TimeColumn,
models.EmailField: EmailColumn,
models.CharField: TextColumn,
models.TextField: TextColumn,
models.UUIDField: UUIDColumn,
models.URLField: TextColumn,
}
try:
mapping[models.BigAutoField] = BigAutoColumn
except AttributeError:
pass
def register_column(key, col):
if isinstance(key, models.Field):
target = "{}.{}.{}".format(key.model._meta.app_label,
key.model._meta.model_name,
key.name).lower()
else:
target = key
mapping[target] = col
def unregister_column(key):
if isinstance(key, models.Field):
target = "{}.{}.{}".format(key.model._meta.app_label,
key.model._meta.model_name,
key.name).lower()
else:
target = key
del mapping[target]
def get_column(field, options=None):
try:
target = "{}.{}.{}".format(field.model._meta.app_label,
field.model._meta.model_name,
field.name).lower()
klass = mapping.get(target, mapping.get(type(field), Column))
except AttributeError:
klass = mapping.get(type(field), Column)
try:
return klass(field, options)
except TypeError: # pragma: no cover
raise ValueError("unknown field {}".format(field))
| saxix/django-excel-data-sync | src/excel_data_sync/columns.py | Python | mit | 14,232 |
"""
This is the config file for GSEA, containing various parameters the user may
wish to modify.
"""
path = dict(
input = 'data',
output = 'data',
)
analysis = dict(
rankby = 's2n',
permut = 1000,
p_weight = 1.0
) | tristanbrown/gsea | gsea/config.py | Python | mit | 245 |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from wordnik import *
import settings
class Keyword(models.Model):
name = models.CharField(max_length=200, unique=True) # case sensitive - lowercased on save()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
def clean(self):
self.name = self.name.lower()
def save(self, *args, **kwargs):
self.full_clean()
return super(Keyword, self).save(*args, **kwargs)
def definition(self):
try:
client = swagger.ApiClient(settings.WORDNIK_API_KEY, settings.WORDNIK_API_URL)
wordApi = WordApi.WordApi(client)
return wordApi.getDefinitions(self.name)[0].text
except:
return None
class Category(models.Model):
name = models.CharField(max_length=200, unique=True)
description = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "categories"
def __unicode__(self):
return self.name
class Document(models.Model):
PRODUCT_SEARCH = "S"
PRODUCT_RESPONSE = "R"
DOCUMENT_TYPES = (
(PRODUCT_SEARCH, "Product Search"),
(PRODUCT_RESPONSE, "Product Response"),
)
of_type = models.CharField(max_length=10, choices=DOCUMENT_TYPES, verbose_name="Type")
architizer_id = models.IntegerField(null=True, verbose_name="Architizer ID")
title = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.title
class Context(models.Model):
document = models.ForeignKey(Document)
keyword_given = models.ForeignKey(Keyword)
position_from = models.IntegerField()
position_to = models.IntegerField()
text = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# post to context with an array of categories maybe keyword...
#create review objects that do not exist (if there is a category in the array)
#delete Review objects that do not happen ( )
def __unicode__(self):
return self.text
def next_context_id(self):
try:
return(Context.objects.get(id=self.id+1).id)
except:
return None
def prev_context_id(self):
try:
return(Context.objects.get(id=self.id-1).id)
except:
return None
class Review(models.Model):
PENDING = "pending"
APPROVED = "approved"
STATUS_TYPES = (
("pending", PENDING),
("approved", APPROVED),
)
context = models.ForeignKey(Context, related_name="reviews")
keyword_proposed = models.ForeignKey(Keyword, related_name="keyword_proposed")
keyword_given = models.ForeignKey(Keyword, related_name="keyword_given")
category = models.ForeignKey(Category)
user = models.ForeignKey(User)
status = models.CharField(max_length=20, choices=STATUS_TYPES, default=PENDING)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('context', 'keyword_proposed', 'category', 'user', 'status')
def __unicode__(self):
return self.status
# def save(self):
# #get keyword given from database
# if self.keyword_proposed:
# exists:
# if Keyword.objects.filter(name=self.keyword_proposed).exists()
| Architizer/mendel | mendel/models.py | Python | agpl-3.0 | 3,850 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-1417.69, 7999.35, 7862.76), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-255.789, 8001.16, 6498.04), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1043.94, 6568.7, 6558.27), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-1105.23, 6466.69, 7375.34), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-941.162, 5006.39, 7850.1), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1250.29, 4004.55, 7630.89), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2884.02, 3624.42, 8062.42), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2110.46, 3500.1, 8396.99), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4715.72, 3921.63, 8075.45), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5603.13, 4104.1, 9479.44), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7067.6, 4794.48, 8494.14), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6688.59, 5676.66, 7930.28), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6795.76, 7228.32, 7499.21), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5634.72, 7607.97, 8385.17), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5975.56, 9776.47, 8813.62), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5649.95, 12276.5, 6970.73), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5172.84, 11330.3, 5316.72), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((6551.64, 10831.2, 5332.18), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((7004.08, 9405.61, 6160.73), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((8213.17, 8881.32, 6942.09), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((7202.72, 6696.88, 6542.3), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7962.53, 8540.03, 6089.45), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((8096.11, 8006.02, 4986.79), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8011.78, 9155.23, 4371.13), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6949.51, 10034.4, 4743.02), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6646.02, 11608.3, 4814.11), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7058.86, 10192.8, 5233.91), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((6239.48, 8380.45, 6155.62), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7425.37, 7808.2, 5585.9), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7734.98, 6616.85, 5947.09), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7707.83, 6311.34, 5104.46), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7397.39, 5899.65, 6742.1), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((8786.72, 6445.96, 5776.58), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((8002.09, 7254.54, 4818.09), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((8091.62, 8572.28, 5129.98), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((8481.41, 9896.28, 5385.73), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7830, 7510.32, 5729.58), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8838.78, 8494.92, 4532.17), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((7846.44, 8018.73, 4088.37), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((9360.31, 7795.61, 4522.41), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8797.64, 6169.54, 4562.66), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((9629.03, 4740.16, 5396.91), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((11563.6, 4238.41, 3551.8), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((10427, 3109.14, 4639.27), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((9909.56, 4653.57, 4218.64), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((8122.37, 4792.28, 4962.55), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7840.32, 4749.12, 3040.13), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9854.48, 4998.02, 2711.31), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8132.93, 4317.58, 2877.28), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6638.64, 3828.94, 3974.42), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7608.62, 2749.54, 3802.01), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6754.89, 3424.53, 5098.69), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5639.85, 4421.52, 6128.29), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((5360.29, 3177.05, 7008.75), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((5545.02, 2580.77, 6606.94), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5877.52, 3639.15, 4788.19), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3879.74, 3905.18, 3928.14), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2837.81, 4577.98, 1724.25), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2662.59, 4759.64, 1181.39), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1989.69, 4429.33, 1533.68), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2989.63, 4262.46, 1724.71), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2622.24, 3482.56, 1346.33), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((3423.06, 4206.64, 2896.65), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((2220.22, 3217.21, 1844.37), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1013.07, 2718.64, 254.321), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2635.84, 1944.75, 412.557), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2741.68, 2678.39, -1078.41), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3156.76, 3007.48, 1318.91), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1418.87, 3169.47, 203.221), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2004.23, 2953.28, -1245.3), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3098.33, 3965.17, -962.568), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models47488.py | Python | gpl-3.0 | 17,589 |
import pytoolkit as tk
module = tk.applications.darknet53
def test_model():
model = module.create(input_shape=(256, 256, 3), weights=None)
assert tuple(module.get_1_over_1(model).shape[1:3]) == (256, 256)
assert tuple(module.get_1_over_2(model).shape[1:3]) == (128, 128)
assert tuple(module.get_1_over_4(model).shape[1:3]) == (64, 64)
assert tuple(module.get_1_over_8(model).shape[1:3]) == (32, 32)
assert tuple(module.get_1_over_16(model).shape[1:3]) == (16, 16)
assert tuple(module.get_1_over_32(model).shape[1:3]) == (8, 8)
def test_save_load(tmpdir):
model = module.create(input_shape=(256, 256, 3), weights=None)
tk.models.save(model, str(tmpdir / "model.h5"))
tk.models.load(str(tmpdir / "model.h5"))
| ak110/pytoolkit | pytoolkit/applications/darknet53_test.py | Python | mit | 752 |
from django.template.base import Context, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import SilentAttrClass, SilentGetItemClass, SomeClass, setup
basic_templates = {
'basic-syntax01': 'something cool',
'basic-syntax02': '{{ headline }}',
'basic-syntax03': '{{ first }} --- {{ second }}',
}
class BasicSyntaxTests(SimpleTestCase):
@setup(basic_templates)
def test_basic_syntax01(self):
"""
Plain text should go through the template parser untouched.
"""
output = self.engine.render_to_string('basic-syntax01')
self.assertEqual(output, "something cool")
@setup(basic_templates)
def test_basic_syntax02(self):
"""
Variables should be replaced with their value in the current
context
"""
output = self.engine.render_to_string('basic-syntax02', {'headline': 'Success'})
self.assertEqual(output, 'Success')
@setup(basic_templates)
def test_basic_syntax03(self):
"""
More than one replacement variable is allowed in a template
"""
output = self.engine.render_to_string('basic-syntax03', {"first": 1, "second": 2})
self.assertEqual(output, '1 --- 2')
@setup({'basic-syntax04': 'as{{ missing }}df'})
def test_basic_syntax04(self):
"""
Fail silently when a variable is not found in the current context
"""
output = self.engine.render_to_string('basic-syntax04')
if self.engine.string_if_invalid:
self.assertEqual(output, 'asINVALIDdf')
else:
self.assertEqual(output, 'asdf')
@setup({'basic-syntax06': '{{ multi word variable }}'})
def test_basic_syntax06(self):
"""
A variable may not contain more than one word
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax06')
@setup({'basic-syntax07': '{{ }}'})
def test_basic_syntax07(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax07')
@setup({'basic-syntax08': '{{ }}'})
def test_basic_syntax08(self):
"""
Raise TemplateSyntaxError for empty variable tags.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax08')
@setup({'basic-syntax09': '{{ var.method }}'})
def test_basic_syntax09(self):
"""
Attribute syntax allows a template to call an object's attribute
"""
output = self.engine.render_to_string('basic-syntax09', {'var': SomeClass()})
self.assertEqual(output, 'SomeClass.method')
@setup({'basic-syntax10': '{{ var.otherclass.method }}'})
def test_basic_syntax10(self):
"""
Multiple levels of attribute access are allowed.
"""
output = self.engine.render_to_string('basic-syntax10', {'var': SomeClass()})
self.assertEqual(output, 'OtherClass.method')
@setup({'basic-syntax11': '{{ var.blech }}'})
def test_basic_syntax11(self):
"""
Fail silently when a variable's attribute isn't found.
"""
output = self.engine.render_to_string('basic-syntax11', {'var': SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax12': '{{ var.__dict__ }}'})
def test_basic_syntax12(self):
"""
Raise TemplateSyntaxError when trying to access a variable
beginning with an underscore.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax12')
# Raise TemplateSyntaxError when trying to access a variable
# containing an illegal character.
@setup({'basic-syntax13': "{{ va>r }}"})
def test_basic_syntax13(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax13')
@setup({'basic-syntax14': "{{ (var.r) }}"})
def test_basic_syntax14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax14')
@setup({'basic-syntax15': "{{ sp%am }}"})
def test_basic_syntax15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax15')
@setup({'basic-syntax16': "{{ eggs! }}"})
def test_basic_syntax16(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax16')
@setup({'basic-syntax17': "{{ moo? }}"})
def test_basic_syntax17(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax17')
@setup({'basic-syntax18': "{{ foo.bar }}"})
def test_basic_syntax18(self):
"""
Attribute syntax allows a template to call a dictionary key's
value.
"""
output = self.engine.render_to_string('basic-syntax18', {"foo": {"bar": "baz"}})
self.assertEqual(output, "baz")
@setup({'basic-syntax19': "{{ foo.spam }}"})
def test_basic_syntax19(self):
"""
Fail silently when a variable's dictionary key isn't found.
"""
output = self.engine.render_to_string('basic-syntax19', {"foo": {"bar": "baz"}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20': "{{ var.method2 }}"})
def test_basic_syntax20(self):
"""
Fail silently when accessing a non-simple method
"""
output = self.engine.render_to_string('basic-syntax20', {'var': SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax20b': "{{ var.method5 }}"})
def test_basic_syntax20b(self):
"""
Don't silence a TypeError if it was raised inside a callable.
"""
template = self.engine.get_template('basic-syntax20b')
with self.assertRaises(TypeError):
template.render(Context({'var': SomeClass()}))
# Don't get confused when parsing something that is almost, but not
# quite, a template tag.
@setup({'basic-syntax21': "a {{ moo %} b"})
def test_basic_syntax21(self):
output = self.engine.render_to_string('basic-syntax21')
self.assertEqual(output, "a {{ moo %} b")
@setup({'basic-syntax22': "{{ moo #}"})
def test_basic_syntax22(self):
output = self.engine.render_to_string('basic-syntax22')
self.assertEqual(output, "{{ moo #}")
@setup({'basic-syntax23': "{{ moo #} {{ cow }}"})
def test_basic_syntax23(self):
"""
Treat "moo #} {{ cow" as the variable. Not ideal, but costly to work
around, so this triggers an error.
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('basic-syntax23')
@setup({'basic-syntax24': "{{ moo\n }}"})
def test_basic_syntax24(self):
"""
Embedded newlines make it not-a-tag.
"""
output = self.engine.render_to_string('basic-syntax24')
self.assertEqual(output, "{{ moo\n }}")
# Literal strings are permitted inside variables, mostly for i18n
# purposes.
@setup({'basic-syntax25': '{{ "fred" }}'})
def test_basic_syntax25(self):
output = self.engine.render_to_string('basic-syntax25')
self.assertEqual(output, "fred")
@setup({'basic-syntax26': r'{{ "\"fred\"" }}'})
def test_basic_syntax26(self):
output = self.engine.render_to_string('basic-syntax26')
self.assertEqual(output, "\"fred\"")
@setup({'basic-syntax27': r'{{ _("\"fred\"") }}'})
def test_basic_syntax27(self):
output = self.engine.render_to_string('basic-syntax27')
self.assertEqual(output, "\"fred\"")
# #12554 -- Make sure a silent_variable_failure Exception is
# suppressed on dictionary and attribute lookup.
@setup({'basic-syntax28': "{{ a.b }}"})
def test_basic_syntax28(self):
output = self.engine.render_to_string('basic-syntax28', {'a': SilentGetItemClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'basic-syntax29': "{{ a.b }}"})
def test_basic_syntax29(self):
output = self.engine.render_to_string('basic-syntax29', {'a': SilentAttrClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
# Something that starts like a number but has an extra lookup works
# as a lookup.
@setup({'basic-syntax30': "{{ 1.2.3 }}"})
def test_basic_syntax30(self):
output = self.engine.render_to_string(
'basic-syntax30',
{"1": {"2": {"3": "d"}}}
)
self.assertEqual(output, 'd')
@setup({'basic-syntax31': "{{ 1.2.3 }}"})
def test_basic_syntax31(self):
output = self.engine.render_to_string(
'basic-syntax31',
{"1": {"2": ("a", "b", "c", "d")}},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax32': "{{ 1.2.3 }}"})
def test_basic_syntax32(self):
output = self.engine.render_to_string(
'basic-syntax32',
{"1": (("x", "x", "x", "x"), ("y", "y", "y", "y"), ("a", "b", "c", "d"))},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax33': "{{ 1.2.3 }}"})
def test_basic_syntax33(self):
output = self.engine.render_to_string(
'basic-syntax33',
{"1": ("xxxx", "yyyy", "abcd")},
)
self.assertEqual(output, 'd')
@setup({'basic-syntax34': "{{ 1.2.3 }}"})
def test_basic_syntax34(self):
output = self.engine.render_to_string(
'basic-syntax34',
{"1": ({"x": "x"}, {"y": "y"}, {"z": "z", "3": "d"})}
)
self.assertEqual(output, 'd')
# Numbers are numbers even if their digits are in the context.
@setup({'basic-syntax35': "{{ 1 }}"})
def test_basic_syntax35(self):
output = self.engine.render_to_string('basic-syntax35', {"1": "abc"})
self.assertEqual(output, '1')
@setup({'basic-syntax36': "{{ 1.2 }}"})
def test_basic_syntax36(self):
output = self.engine.render_to_string('basic-syntax36', {"1": "abc"})
self.assertEqual(output, '1.2')
@setup({'basic-syntax37': '{{ callable }}'})
def test_basic_syntax37(self):
"""
Call methods in the top level of the context.
"""
output = self.engine.render_to_string('basic-syntax37', {"callable": lambda: "foo bar"})
self.assertEqual(output, 'foo bar')
@setup({'basic-syntax38': '{{ var.callable }}'})
def test_basic_syntax38(self):
"""
Call methods returned from dictionary lookups.
"""
output = self.engine.render_to_string('basic-syntax38', {"var": {"callable": lambda: "foo bar"}})
self.assertEqual(output, 'foo bar')
@setup({'template': '{% block content %}'})
def test_unclosed_block(self):
msg = "Unclosed tag 'block'. Looking for one of: endblock."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% if a %}'})
def test_unclosed_block2(self):
msg = "Unclosed tag 'if'. Looking for one of: elif, else, endif."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'tpl-str': '%s', 'tpl-percent': '%%', 'tpl-weird-percent': '% %s'})
def test_ignores_strings_that_look_like_format_interpolation(self):
output = self.engine.render_to_string('tpl-str')
self.assertEqual(output, '%s')
output = self.engine.render_to_string('tpl-percent')
self.assertEqual(output, '%%')
output = self.engine.render_to_string('tpl-weird-percent')
self.assertEqual(output, '% %s')
| DONIKAN/django | tests/template_tests/syntax_tests/test_basic.py | Python | bsd-3-clause | 12,382 |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.4 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.0"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
except:
base64 = binascii = None
def _s2bytes(s):
# Convert a UTF-8 str to bytes if the interpreter is Python 3
try:
return bytes(s, 'utf8')
except (NameError, TypeError):
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same (TypeError)
return s
def _l2bytes(l):
# Convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is not str:
# In Python 2.6 and above, this call won't raise an exception
# but it will return bytes([65]) as '[65]' instead of 'A'
return bytes(l)
raise NameError
except NameError:
return ''.join(map(chr, l))
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto',
'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp',
'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, types, cgi, urllib, urllib2, datetime
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
match = self.endbracket.match(string,index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
sgmllib.endbracket = EndBracketRegEx()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.__contains__(self, k):
return UserDict.__getitem__(self, k)
if UserDict.__contains__(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.__contains__(self, key)
except AttributeError:
return False
# This alias prevents the 2to3 tool from changing the semantics of the
# __contains__ function below and exhausting the maximum recursion depth
__has_key = has_key
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.__has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
_ebcdic_to_ascii_map = _maketrans( \
_l2bytes(range(256)), _l2bytes(emap))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# the sgml parser doesn't handle entities in attributes, but
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
attrs = [(k, v.replace('&', '&')) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if type(baseuri) != type(u''):
try:
baseuri = unicode(baseuri, self.encoding)
except:
baseuri = unicode(baseuri, 'iso-8859-1')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK: self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK: raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, basestring):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, s):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',s)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and self.feeddata.has_key('image'):
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url is not None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
if uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:'+prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix is None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = [
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
]
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + '_INVALID_PYTHON_3'
except NameError:
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return None
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return None
else: return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip()))
def unknown_starttag(self, tag, attrs):
if _debug:
sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if _debug:
sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
if base.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return base
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, _type):
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 is not None
user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8')
except:
pass
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
# Account for the Etc/GMT timezone by stripping 'Etc/'
elif len(data) == 5 and data[4].lower().startswith('etc/'):
data[4] = data[4][4:]
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type')))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]):
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]):
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]):
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]):
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]):
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == 'gb2312':
true_encoding = 'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub(_s2bytes(''), head)
doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if doctype.lower().count(_s2bytes('netscape')):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)])
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# if feed is gzip-compressed, decompress it
if f and data and 'headers' in result:
if gzip and result['headers'].get('content-encoding') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and result['headers'].get('content-encoding') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if 'headers' in result:
if 'etag' in result['headers'] or 'ETag' in result['headers']:
etag = result['headers'].get('etag', result['headers'].get('ETag'))
if etag:
result['etag'] = etag
if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']:
modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified'))
if modified:
result['modified'] = _parse_date(modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type') or http_headers.has_key('Content-type'):
bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type'))
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
# ensure that baseuri is an absolute uri using an acceptable URI scheme
contentloc = http_headers.get('content-location', http_headers.get('Content-Location', ''))
href = result.get('href', '')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', http_headers.get('Content-Language', None))
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'document declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
| Arno-Nymous/pyload | module/lib/feedparser.py | Python | gpl-3.0 | 167,083 |
'''
Basic implementation of an emulator of Chip8 in python
to research low level purpose
More info about tech spec: http://en.wikipedia.org/wiki/CHIP-8
or http://es.wikipedia.org/wiki/CHIP-8
'''
__author__ = "Martin Dimondo"
__license__ = "Revised BSD"
__email__ = "martin.dimondo@gmail.com"
from datetime import datetime
import logging
import time
import display
import keypad
import memory
import util
import pygame
import operations
CLOCK_RATE = 60 # Run at 60hz
CPU_REGISTERS = 16
logging.basicConfig(level=logging.INFO)
class CPUEmulator(object):
executor = operations.Executor(operations.op_map)
def __init__(self):
self.pc = memory.MEMORY_START # Program Counter
self.delay_timer = 0
self.sound_timer = 0
self.interrupted = False
self.screen = display.Chip8Screen()
self.memory = memory.Chip8Memory()
self.keypad = keypad.Chip8Keypad()
self._init_mem()
self._init_registers()
def _init_registers(self):
logging.info("Initializing cpu registers")
self.index_reg = 0
self.v = [0x0] * CPU_REGISTERS
self.stack = []
self.op = None
def _init_mem(self):
logging.info("Initializing memory")
self.memory.write_array(memory.FONT_ADDRESS, display.FONT_SPRITES)
def load_program(self, prog):
logging.info("Loading program into memory")
self.memory.write_array(memory.MEMORY_START, prog)
def main_loop(self):
logging.info("Running main loop emulator...")
clock = pygame.time.Clock()
while True:
clock.tick(CLOCK_RATE)
self.screen.update()
self.keypad.check_keys()
self.cycle()
if self.delay_timer > 0:
self.delay_timer -= 1
logging.debug('Delay timer: %s' % self.delay_timer)
if self.sound_timer > 0:
if self.sound_timer == 1:
logging.debug('BEEP - Sound timer: %s' % self.sound_timer)
print('\a') # Nonzero value produce a beep in a Chip8
# better ... shoot to the computer speaker
self.sound_timer -= 1
if self.keypad.is_esc_pressed():
raise InterruptedException('CPU execution cycle was interrupted')
def cycle(self):
self.op = self._fetch()
logging.debug('Current OpCode: 0x%02x' % self.op)
self.executor.execute(self, self.op)
def _fetch(self):
'''
Fetch opcode bytes
'''
byte_a = self.memory.get_byte(self.pc) << 8 # shift to left 8 bits
byte_b = self.memory.get_byte(self.pc + 1)
return byte_a | byte_b
class InterruptedException(Exception):
pass
| martindimondo/PyC8 | chip8/cpu.py | Python | bsd-3-clause | 2,879 |
#!/usr/bin/env python
"""Core library to build and test the random forest."""
from __future__ import division
import numpy as np
from multiprocessing import Manager, cpu_count
from parallel import Worker, Task, retrieve
from base import Node, Leaf
__author__ = "Andrea Casini"
__copyright__ = "Copyright 2014"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "acasini@dsi.unive.it"
def _build_tree(x, y, f, random_state, root=Node(), depth=0):
""" Build a decision tree for a given data.
"""
n_samples = len(y)
# 1st base case : empty arrays.
if n_samples == 0:
return
counts = np.bincount(y)
n_classes = np.count_nonzero(counts)
# 2nd base case : all node's labels are equal.
if n_classes <= 1:
return Leaf(y[0])
# 3rd base case : maximum depth or minimum sample size reached.
if depth >= f.max_depth != -1 or n_samples <= f.min_samples_split != -1:
return Leaf(counts.argmax())
# Train this node ...
root.train(x, y, f.n_rounds, random_state)
# ... and use it to split x
z = root.split(x)
assert z is not None
# Recursive calls.
root.left = _build_tree(x[~z], y[~z], f, random_state, Node(), depth + 1)
root.right = _build_tree(x[z], y[z], f, random_state, Node(), depth + 1)
return root
def _tree_predict(x, root, indices=None, output=None):
""" Compute labels predictions of a single tree.
"""
if indices is None and output is None:
indices = np.arange(x.shape[0])
output = np.zeros(x.shape[0])
if len(indices) == 0 or root is None:
return
if root.is_a_leaf:
output[indices] = root.label
return
z = root.split(np.take(x, indices, axis=0))
_tree_predict(x, root.left, indices[~z], output)
_tree_predict(x, root.right, indices[z], output)
return output
#______________________________________________________________________________
class RandomForest():
""" A random forest classifier.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_trees : integer, optional (default = 20)
The number of trees in the forest.
max_depth : integer, optional (default = -1)
The maximum depth of the tree. If -1, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
n_rounds : integer, optional (default = 10)
The number of splits to perform in order to find the best one.
min_samples_split : integer, optional (default = 1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
n_jobs : integer, optional (default = -1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
"""
def __init__(self,
n_trees=20,
max_depth=-1,
n_rounds=100,
min_samples_split=1,
n_jobs=-1,
forest=None):
self.n_trees = n_trees
self.max_depth = max_depth
self.n_rounds = n_rounds
self.min_samples_split = min_samples_split
self.n_jobs = n_jobs
self.forest = forest
def fit(self, x, y):
""" Build a random forest of trees from the training set (x, y).
Parameters
----------
x : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like of shape = [n_samples]
The target values (integers that correspond to classes).
Returns
-------
self : object
Returns self.
"""
if self.n_jobs == -1:
n_workers = min(cpu_count(), self.n_trees)
else:
n_workers = min(self.n_jobs, self.n_trees)
# Establish communication queues.
tasks = Manager().JoinableQueue()
results = Manager().Queue()
# Start workers.
workers = [Worker(tasks, results) for _ in xrange(n_workers)]
for w in workers:
w.start()
# Populate task's queue.
for i in xrange(self.n_trees):
# Create a new random state for each tree.
random_state = np.random.RandomState(i)
tasks.put(Task(_build_tree, (x, y, self,random_state), i))
# Add a poison pill for each worker.
for i in xrange(n_workers):
tasks.put(None)
# Wait for all of the tasks to finish.
tasks.join()
# Retrieve results i.e. the trees from the queue.
self.forest = retrieve(results, self.n_trees)
return self
def predict(self, x):
""" Predict class for test set x.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
x : array-like of shape = [n_samples, n_features]
The test input samples.
Returns
-------
y_pred : array of shape = [n_samples]
The predicted classes.
probs : array of shape = [n_samples]
Probabilities of each sample to belong to the predicted class.
"""
if self.n_jobs == -1:
n_workers = min(cpu_count(), self.n_trees)
else:
n_workers = min(self.n_jobs, self.n_trees)
# Establish communication queues.
tasks = Manager().JoinableQueue()
results = Manager().Queue()
# Start workers.
workers = [Worker(tasks, results) for _ in xrange(n_workers)]
for w in workers:
w.start()
# Populate task's queue.
for i in xrange(self.n_trees):
tasks.put(Task(_tree_predict, (x, self.forest[i]), i))
# Add a poison pill for each worker.
for i in xrange(n_workers):
tasks.put(None)
# Wait for all of the tasks to finish.
tasks.join()
# Retrieve results i.e. the votes of the trees from the queue i.e
# an array of shape [n_trees, n_samples].
votes = np.array(retrieve(results, self.n_trees), int)
# Count up the votes of the trees.
n_classes = len(np.unique(votes))
counts = np.apply_along_axis(
lambda z: np.bincount(z, minlength=n_classes), 0, votes)
# Classify each sample according to the majority of the votes.
y_pred = np.argmax(counts, axis=0)
return y_pred, counts / self.n_trees | bluesurfer/pyforest | pyforest/forest.py | Python | mit | 6,795 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.mininode import wait_until
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_equal,
assert_raises_jsonrpc,
connect_nodes_bi,
start_node,
stop_node,
)
class DisconnectBanTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban("127.0.0.1", "add")
assert wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_jsonrpc(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_jsonrpc(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_jsonrpc(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
stop_node(self.nodes[1], 1)
self.nodes[1] = start_node(1, self.options.tmpdir)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_jsonrpc(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
assert wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| earonesty/bitcoin | test/functional/disconnect_ban.py | Python | mit | 5,635 |
from django.core.urlresolvers import resolve, reverse
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from pretix.base.signals import logentry_display
from pretix.control.signals import nav_event
@receiver(nav_event, dispatch_uid="sendmail_nav")
def control_nav_import(sender, request=None, **kwargs):
url = resolve(request.path_info)
if not request.eventperm.can_change_orders:
return []
return [
{
'label': _('Send out emails'),
'url': reverse('plugins:sendmail:send', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': (url.namespace == 'plugins:sendmail' and url.url_name == 'send'),
'icon': 'envelope',
'children': [
{
'label': _('Send email'),
'url': reverse('plugins:sendmail:send', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': (url.namespace == 'plugins:sendmail' and url.url_name == 'send'),
},
{
'label': _('Email history'),
'url': reverse('plugins:sendmail:history', kwargs={
'event': request.event.slug,
'organizer': request.event.organizer.slug,
}),
'active': (url.namespace == 'plugins:sendmail' and url.url_name == 'history'),
},
]
},
]
@receiver(signal=logentry_display)
def pretixcontrol_logentry_display(sender, logentry, **kwargs):
plains = {
'pretix.plugins.sendmail.sent': _('Email was sent'),
'pretix.plugins.sendmail.order.email.sent': _('The order received a mass email.')
}
if logentry.action_type in plains:
return plains[logentry.action_type]
| Flamacue/pretix | src/pretix/plugins/sendmail/signals.py | Python | apache-2.0 | 2,026 |
"""
(C) Copyright 2009 Igor V. Custodio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from posplat.ISO8583.ISO1583_BIT62 import ISO8583_BIT62
from posplat.ISO8583.ISO8583 import ISO8583
from posplat.ISO8583.ISOErrors import *
import traceback
import os
os.system(['clear', 'cls'][os.name == 'nt'])
# Testing some funcionalities
p2 = ISO8583()
p2.setMTI('0800')
p2.setBit(2, 2)
p2.setBit(4, 4)
p2.setBit(12, 12)
p2.setBit(17, 17)
p2.setBit(99, 99)
print ('The MTI is = %s' % p2.getMTI())
print ('The Bitmap is = %s' % p2.getBitmap())
# Showing bits...
p2.showIsoBits()
#Save the ASCII ISO value without size
iso = p2.getRawIso()
print ('\n\n\n------------------------------------------\n')
print ('This is the ISO <%s> that will be interpreted' % iso)
# New ISO
i = ISO8583()
# Set the ASCII
i.setIsoContent(iso)
# Showing that everything is ok
print ('The MTI is = %s' % i.getMTI())
print ('The Bitmap is = %s' % i.getBitmap())
print ('Show bits inside the package')
i.showIsoBits()
# Using == to compare ISOS's
print ('Compare ISOs ...')
if i == p2:
print ('They are equivalent!')
else:
print ('The are differente')
# More example...
print ('\n\n\n------------------------------------------\n')
i3 = ISO8583()
i3.setMTI('0800')
i3.setBit(3, '300000')
i3.setBit(24, '045')
i3.setBit(41, '11111111')
i3.setBit(42, '222222222222222')
p = ISO8583_BIT62()
p.setBit(1, "123")
p.setBit(2, "456")
p.setBit(3, 4897)
p.setBit(6, 'F')
p.setBit(7, '2F')
p.setBit(11, 'A1000234')
p.setBit(88, 'A123')
r = p.getRawIso()
i3.setBit(62, r)
i3.showIsoBits()
print ('This is the pack %s' % i3.getRawIso())
| davidvon/pipa-pay-server | admin/posplat/test/example2.py | Python | apache-2.0 | 2,173 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import logging
from django.utils.translation import ugettext as _
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Button
import floppyforms.__future__ as forms
from .models import Recipe
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RecipeForm(forms.ModelForm):
class Meta(object):
model = Recipe
fields = (
'name',
'amount',
'unit',
) # yapf: disable
ingredients = forms.CharField(widget=forms.Textarea())
def __init__(self, *args, **kwargs):
instance = kwargs['instance']
initial = kwargs.pop('initial', {})
initial['ingredients'] = '\n'.join(instance.ingredients if instance else [])
super(RecipeForm, self).__init__(*args, initial=initial, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'amount',
'unit',
'ingredients',
HTML('''
<div class="row">
<div class="col-md-8" id="meal-counter"></div>
<div class="col-md-4" id="ingredient-finder">
<div class="panel ingredient-finder-panel" id='ingredient-finder-results'></div>
</div>
</div>'''),
FormActions(Submit('submit', _('Agregar'),
css_class='btn-primary pull-right',
data_loading_text=_('Agregando...')),
Button('meal-counter-button', _('Calcular'),
css_id='meal-counter-button',
css_class='pull-right'), )
) # yapf: disable
def save(self, *args, **kwargs):
self.instance.ingredients = self.cleaned_data['ingredients']
return super(RecipeForm, self).save(*args, **kwargs)
def clean_ingredients(self):
data = self.cleaned_data['ingredients']
if data:
return [l.strip() for l in data.split('\n')]
return data
| pignacio/vld_django | vld_django/recipes/forms.py | Python | gpl-3.0 | 2,236 |
import _ast
import collections
from cStringIO import StringIO
import re
import sys
import os
from icbd.util import ast_utils, cfa
from icbd.type_analyzer import builtins, type_checker, type_system
from . import closure_analyzer, usage_checker, phi_analyzer
from .type_conversion import InferredTypeOutput
from .code_emitter import CodeEmitter
from .compiler_types import (
BINOP_MAP,
COMPARE_MAP,
COMPARE_REFLECTIONS,
UserAttributeError,
UserTypeError,
AttributeAccessType,
CantInstantiateException,
format_float,
eval_template,
eval_ctemplate,
Variable,
UnboxedFunctionMT,
UnboxedTupleMT,
TupleMT,
ClassMT,
Type,
TypeClass,
FileClass,
ObjectClass,
File,
ListMT,
SetMT,
DequeMT,
Bool,
Float,
Str,
Int,
None_,
TypeFunc,
ClosureMT,
StrConstant,
_SpecialFuncMT,
PolymorphicFunctionMT,
UnboxedInstanceMethod,
CallableMT,
ModuleMT,
UserModuleMT,
DictMT,
SliceMT,
Slice,
BUILTINS,
BUILTIN_MODULES,
STDLIB_TYPES,
)
class CompileWalker(object):
def __init__(self, parent_module, node_id, func_type, cg, em, sym_table, type_info, live_at_end, vars_to_raise, closure_results, my_closure_results, globals_, is_module):
assert isinstance(my_closure_results, closure_analyzer.ClosureResults), my_closure_results
self._parent_module = parent_module
self._nid = node_id
self._func_type = func_type
self.cg = cg
self.em = em
self._st = sym_table
self._type_info = type_info
self._live_at_end = live_at_end
self._vars_to_raise = vars_to_raise
self._closure_results = closure_results
self._cr = my_closure_results
self._globals = globals_
self._is_module = is_module
def _find_and_apply_binop(self, _v1, _v2, *ops):
_v1.incvref(self.em)
_v2.incvref(self.em)
# number of vrefs: (v1) 2, (v2) 2
for op_name, is_reversed in ops:
# 2, 2
if is_reversed:
v1, v2 = _v2, _v1
else:
v1, v2 = _v1, _v2
# 2, 2
try:
f = v1.getattr(self.em, op_name, clsonly=True)
# 1, 2
except UserAttributeError:
# 1, 2
v1.incvref(self.em)
continue
# 1, 2
if f.t.can_call([v2.t]):
r = f.call(self.em, [v2])
# 1, 1
else:
# 1, 2
f.decvref(self.em)
v1.incvref(self.em)
# 2, 2
continue
# 1, 1
v1.decvref(self.em)
v2.decvref(self.em)
# 0, 0
return r
# 2, 2
# v1.decvref(self.em)
# v1.decvref(self.em)
# v2.decvref(self.em)
# v2.decvref(self.em)
raise Exception("Couldn't apply any of %s on %s, %s" % (ops, _v1.t, _v2.t))
def _get(self, node):
self.em.pl("; %s:" % getattr(node, "lineno", "??") + " " + ast_utils.format_node(node))
self.em.indent(2)
r = self._evaluate(node)
self.em.pl("; end" + " " + ast_utils.format_node(node))
self.em.indent(-2)
# Skip generated nodes since they're not in the type inference (TODO are there cases that that could break?)
# and skip several classes of things that can't always be converted
if not hasattr(node, "not_real") and not isinstance(r.t, (UnboxedFunctionMT, _SpecialFuncMT, PolymorphicFunctionMT, UnboxedInstanceMethod, CallableMT, ModuleMT, ListMT.ListIteratorMT, DictMT.DictIteratorMT, ClassMT)):
expected_type = self._type_info.get_expr_type(self.em, node)
if not (expected_type is r.t or expected_type is r.t.get_instantiated()):
assert r.t.can_convert_to(expected_type), (expected_type, r.t)
r = r.convert_to(self.em, expected_type)
return r
def _evaluate(self, node):
assert isinstance(node, _ast.AST), node
if isinstance(node, _ast.Str):
return Variable(StrConstant, (node.s,), 1, False)
elif isinstance(node, _ast.Num):
if isinstance(node.n, (int, long)):
assert -2 ** 62 < node.n < 2 ** 62
return Variable(Int, node.n, 1, True)
assert isinstance(node.n, float)
return Variable(Float, format_float(node.n), 1, True)
elif isinstance(node, _ast.Name):
n = node.id
assert (n in self._cr.from_local) + (n in self._cr.from_closure) + (n in self._cr.from_global) + (n in self._cr.from_builtins) == 1, (n, self._cr.__dict__)
should_be_in_st = (n in self._cr.from_local) or (not self._is_module and n in self._cr.used_in_nested)
assert (n in self._st) == should_be_in_st, (n, n in self._st, should_be_in_st, self._cr.__dict__)
if n in self._cr.from_global or n in self._cr.globals_:
m = self.cg.modules[self._parent_module]
m.incvref(None)
return m.getattr(self.em, node.id)
elif n in self._cr.from_local:
v = self._st[node.id]
v.incvref(self.em)
return v
elif n in self._cr.from_closure:
assert node.id not in self._st, "%s is in both the closure and the local scope??" % (node.id,)
assert node.id not in self._globals, "%s is in both the closure and globals??" % (node.id,)
closure = self._st["__parent_closure__"]
# TODO might still need to hit the global closure in this case
v = closure.t.get(self.em, closure.v, node.id)
return v
elif n in self._cr.from_builtins:
if n == "__name__":
m = self.cg.modules[self._parent_module]
return Variable(StrConstant, (m.t.module_name,), 1, False)
v = BUILTINS[node.id].dup({})
v.incvref(self.em)
return v
else:
raise Exception("wtf, couldn't find %s" % node.id)
elif isinstance(node, _ast.BinOp):
v1 = self._get(node.left)
v2 = self._get(node.right)
op_name = BINOP_MAP[type(node.op)]
rop_name = "__r" + op_name[2:]
return self._find_and_apply_binop(v1, v2, (op_name, False), (rop_name, True))
elif isinstance(node, _ast.Compare):
if len(node.comparators) > 1:
sub_compare = _ast.Compare(node.comparators[0], node.ops[1:], node.comparators[1:], not_real=True)
twoarg_compare = _ast.Compare(node.left, node.ops[:1], node.comparators[:1], not_real=True)
new_node = _ast.BoolOp(_ast.And(), [twoarg_compare, sub_compare], not_real=True, lineno=node.lineno, col_offset=node.col_offset)
return self._evaluate(new_node)
assert len(node.comparators) == 1
# TODO use getattr
v1 = self._get(node.left)
v2 = self._get(node.comparators[0])
if isinstance(node.ops[0], (_ast.Is, _ast.IsNot)):
if v1.t.can_convert_to(v2.t):
v1 = v1.convert_to(self.em, v2.t)
if v2.t.can_convert_to(v1.t):
v2 = v2.convert_to(self.em, v1.t)
assert v1.t is v2.t, (v1.t, v2.t)
r = '%' + self.em.mkname()
self.em.pl("%s = icmp eq %s %s, %s" % (r, v1.t.llvm_type(), v1.v, v2.v))
if isinstance(node.ops[0], _ast.IsNot):
r2 = '%' + self.em.mkname()
self.em.pl("%s = xor i1 %s, 1" % (r2, r))
r = r2
v1.decvref(self.em)
v2.decvref(self.em)
return Variable(Bool, r, 1, False)
op_type = type(node.ops[0])
op_name = COMPARE_MAP[op_type]
if op_type is _ast.In or op_type is _ast.NotIn:
v1, v2 = v2, v1
ops = [(op_name, False)]
if op_type in COMPARE_REFLECTIONS:
reverse_name = COMPARE_MAP[COMPARE_REFLECTIONS[op_type]]
ops.append((reverse_name, True))
return self._find_and_apply_binop(v1, v2, *ops)
elif isinstance(node, _ast.Call):
f = self._get(node.func)
assert not node.keywords
assert not node.starargs
assert not node.kwargs
args = [self._get(e) for e in node.args]
expected_type = None
if isinstance(node.func, _ast.Name) and node.func.id in ("deque", "list", "set", "dict"):
expected_type = self._type_info.get_expr_type(self.em, node)
r = f.call(self.em, args, expected_type=expected_type)
return r
elif isinstance(node, _ast.Attribute):
v = self._get(node.value)
# print v.t, self._type_info.get_expr_type(self.em, node.value)
r = v.getattr(self.em, node.attr)
return r
elif isinstance(node, _ast.Subscript):
v = self._get(node.value)
s = self._get(node.slice)
# v has one ref, and will have two taken off (getattr and then call)
# v.incvref(self.em)
f = v.getattr(self.em, "__getitem__", clsonly=True)
r = f.call(self.em, [s])
return r
elif isinstance(node, _ast.Index):
return self._get(node.value)
elif isinstance(node, _ast.List):
t = self._type_info.get_expr_type(self.em, node)
assert isinstance(t, ListMT), t
name = "%" + self.em.mkname()
self.em.pl("%s = call %s %s()" % (name, t.llvm_type(), t.get_ctor_name()))
r = Variable(t, name, 1, True)
r.incvref(self.em) # for the getattr
f = r.getattr(self.em, "append")
for e in node.elts:
arg = self._get(e)
f.incvref(self.em) # for the next call
f.call(self.em, [arg])
f.decvref(self.em) # because we incvref'd it
return r
elif isinstance(node, _ast.Dict):
t = self._type_info.get_expr_type(self.em, node)
assert isinstance(t, DictMT), t
name = '%' + self.em.mkname()
self.em.pl("%s = call %s %s()" % (name, t.llvm_type(), t.get_ctor_name()))
r = Variable(t, name, 1, True)
r.incvref(self.em)
f = r.getattr(self.em, "__setitem__", clsonly=True) # I guess it shouldn't matter if clsonly is set or not
for i in xrange(len(node.keys)):
k = self._get(node.keys[i])
v = self._get(node.values[i])
f.incvref(self.em) # for the next call
f.call(self.em, [k, v])
f.decvref(self.em) # because we incvref'd it
return r
elif isinstance(node, _ast.Tuple):
elts = [self._get(e) for e in node.elts]
t = UnboxedTupleMT([e.t for e in elts])
return Variable(t, tuple(elts), 1, False)
# t = TupleMT.get_tuple(self.em, [e.t.get_instantiated() for e in elts])
# t_check = self._type_info.get_expr_type(self.em, node)
# assert t is t_check
# return t.alloc(elts)
elif isinstance(node, _ast.Slice):
lower = node.lower and self._get(node.lower)
upper = node.upper and self._get(node.upper)
step = node.step and self._get(node.step)
return SliceMT.create(self.em, lower, upper, step)
elif isinstance(node, _ast.UnaryOp):
v = self._get(node.operand)
if isinstance(node.op, _ast.Not):
f = v.getattr(self.em, "__nonzero__", clsonly=True)
r = f.call(self.em, [])
assert r.t is Bool, "not sure what the behavior is in this case"
n = "%" + self.em.mkname()
self.em.pl("%s = xor i1 %s, 1" % (n, r.v))
return Variable(Bool, n, 1, True)
elif isinstance(node.op, _ast.USub):
f = v.getattr(self.em, "__neg__", clsonly=True)
r = f.call(self.em, [])
return r
else:
raise Exception(node.op)
elif isinstance(node, _ast.Lambda):
return self._handle_function(node)
elif isinstance(node, _ast.BoolOp):
assert len(node.values) >= 2
if isinstance(node.op, _ast.Or):
lhs = self._get(node.values[0])
if hasattr(node, 'not_real'):
rtn_type = lhs.t.get_instantiated()
else:
rtn_type = self._type_info.get_expr_type(self.em, node)
_lhs = lhs.convert_to(self.em, rtn_type)
lhs = _lhs.split(self.em)
_lhs.decvref(self.em)
lhs.incvref(self.em) # for the getattr
lhs_nonzero = lhs.getattr(self.em, "__nonzero__", clsonly=True)
lhs_bool = lhs_nonzero.call(self.em, [])
assert lhs_bool.t is Bool, lhs.t
next = self.em.mkname(prefix="label")
inputs = [(self.cg.blockname, lhs.v)]
for elt in node.values[1:]:
iffalse = self.em.mkname(prefix="label")
self.em.pl("br i1 %s, label %%%s, label %%%s" % (lhs_bool.v, next, iffalse))
self.em.indent(-4)
self.em.pl("%s:" % iffalse)
self.em.indent(4)
self.cg.blockname = iffalse
d = lhs.t.decref_llvm(self.em, lhs.v)
if d:
self.em.pl(d + "; fell from conditional")
lhs = self._get(elt)
_lhs = lhs.convert_to(self.em, rtn_type)
lhs = _lhs.split(self.em)
_lhs.decvref(self.em)
if elt is not node.values[-1]:
lhs.incvref(self.em)
lhs_nonzero = lhs.getattr(self.em, "__nonzero__", clsonly=True)
lhs_bool = lhs_nonzero.call(self.em, [])
assert lhs_bool.t is Bool, lhs.t
else:
del lhs_bool, lhs_nonzero
inputs.append((self.cg.blockname, lhs.v))
self.em.pl("br label %%%s" % next)
rtn = '%' + self.em.mkname()
self.em.indent(-4)
self.em.pl("%s:" % next)
self.em.indent(4)
self.em.pl("%s = phi %s %s" % (rtn, rtn_type.llvm_type(), ", ".join(["[%s, %%%s]" % (vn, bn) for bn, vn in inputs])))
self.cg.blockname = next
return Variable(rtn_type, rtn, 1, True)
elif isinstance(node.op, _ast.And):
lhs = self._get(node.values[0])
if hasattr(node, 'not_real'):
rtn_type = lhs.t.get_instantiated()
else:
rtn_type = self._type_info.get_expr_type(self.em, node)
_lhs = lhs.convert_to(self.em, rtn_type)
lhs = _lhs.split(self.em)
_lhs.decvref(self.em)
lhs.incvref(self.em) # for the getattr
lhs_nonzero = lhs.getattr(self.em, "__nonzero__", clsonly=True)
lhs_bool = lhs_nonzero.call(self.em, [])
assert lhs_bool.t is Bool, lhs.t
next = self.em.mkname(prefix="label")
# TODO this is wrong
rtn_type = lhs.t.get_instantiated()
inputs = [(self.cg.blockname, lhs.v)]
for elt in node.values[1:]:
iftrue = self.em.mkname(prefix="label")
self.em.pl("br i1 %s, label %%%s, label %%%s" % (lhs_bool.v, iftrue, next))
self.em.indent(-4)
self.em.pl("%s:" % iftrue)
self.em.indent(4)
self.cg.blockname = iftrue
d = lhs.t.decref_llvm(self.em, lhs.v)
if d:
self.em.pl(d + "; fell from conditional")
lhs = self._get(elt)
_lhs = lhs.convert_to(self.em, rtn_type)
lhs = _lhs.split(self.em)
_lhs.decvref(self.em)
if elt is not node.values[-1]:
lhs.incvref(self.em)
lhs_nonzero = lhs.getattr(self.em, "__nonzero__", clsonly=True)
lhs_bool = lhs_nonzero.call(self.em, [])
assert lhs_bool.t is Bool, lhs.t
else:
del lhs_bool, lhs_nonzero
inputs.append((self.cg.blockname, lhs.v))
self.em.pl("br label %%%s" % next)
rtn = '%' + self.em.mkname()
self.em.indent(-4)
self.em.pl("%s:" % next)
self.em.indent(4)
self.em.pl("%s = phi %s %s" % (rtn, rtn_type.llvm_type(), ", ".join(["[%s, %%%s]" % (vn, bn) for bn, vn in inputs])))
self.cg.blockname = next
return Variable(rtn_type, rtn, 1, True)
else:
raise Exception(node.op)
elif isinstance(node, (_ast.ListComp, _ast.GeneratorExp)):
list_type = self._type_info.get_expr_type(self.em, node)
assert isinstance(list_type, ListMT)
assert len(node.generators) == 1
[g] = node.generators
assert not g.ifs
set_names = [n.id for n in ast_utils.find_names(g.target) if isinstance(n.ctx, _ast.Store)]
inner_set_names = [n.id for n in ast_utils.find_names(node.elt) if isinstance(n.ctx, _ast.Store)]
set_names += inner_set_names
if isinstance(g.iter, _ast.Call) and isinstance(g.iter.func, _ast.Name) and g.iter.func.id in ("range", "xrange"):
assert len(g.iter.args) == 1
assert not g.iter.starargs
assert not g.iter.kwargs
assert not g.iter.keywords
end = self._get(g.iter.args[0])
assert end.t is Int
is_xrange = True
else:
gen = self._get(g.iter)
iter_func = gen.getattr(self.em, "__iter__", clsonly=True)
iter = iter_func.call(self.em, [])
is_xrange = False
rtn = '%' + self.em.mkname()
iter_name = '%' + self.em.mkname()
next_iter = '%' + self.em.mkname()
start_label = self.cg.blockname
check_label = self.em.mkname(prefix="label")
loop_label = self.em.mkname(prefix="label")
done_label = self.em.mkname(prefix="label")
loop_label_placeholder = self.em.get_placeholder()
self.em.pl("%s = call %s %s()" % (rtn, list_type.llvm_type(), list_type.get_ctor_name()))
clear_names = []
changed_names = {}
for n in set_names:
if n in self._globals:
continue
if self._is_module and self.cg.modules[self._parent_module].t.has(n):
continue
if n not in self._st:
clear_names.append(n)
continue
v = self._st[n]
v2 = v.convert_to(self.em, v.t.get_instantiated())
assert v2.nrefs == 1
if not v2.marked:
self.em.pl(v2.t.incref_llvm(self.em, v2.v) + " ; marking")
old_name = v2.v
new_name = "%" + self.em.mkname()
changed_names[n] = (old_name, new_name)
v2.v = new_name
self._st[n] = v2
self.em.pl("br label %%%s" % check_label)
self.em.indent(-4)
self.em.pl("%s:" % check_label)
self.cg.blockname = check_label
phi_placeholder = self.em.get_placeholder()
self.em.pl(phi_placeholder)
self.em.indent(4)
if is_xrange:
done_name = '%' + self.em.mkname()
self.em.pl("%s = phi i64 [0, %%%s], [%s, %%%s]" % (iter_name, start_label, next_iter, loop_label_placeholder))
self.em.pl("%s = icmp sge i64 %s, %s" % (done_name, iter_name, end.v))
self.em.pl("br i1 %s, label %%%s, label %%%s" % (done_name, done_label, loop_label))
else:
iter.incvref(self.em)
has_next_func = iter.getattr(self.em, "hasnext", clsonly=True)
has_next = has_next_func.call(self.em, [])
assert isinstance(has_next, Variable)
assert has_next.t is Bool
self.em.pl("br i1 %s, label %%%s, label %%%s" % (has_next.v, loop_label, done_label))
self.em.indent(-4)
self.em.pl("%s:" % loop_label)
self.cg.blockname = loop_label
self.em.indent(4)
# TODO do I need to use the cache here? is this right?
saved_syms = dict([(sym, v.dup({})) for sym, v in self._st.iteritems()])
if is_xrange:
self._set(g.target, Variable(Int, iter_name, 1, True))
else:
iter.incvref(self.em)
next_func = iter.getattr(self.em, "next", clsonly=True)
next = next_func.call(self.em, [])
self._set(g.target, next)
elt = self._get(node.elt)
for k in clear_names:
if k not in self._st:
assert k in inner_set_names
continue
v = self._st.pop(k)
v.decvref(self.em, "clearing")
self.em.register_replacement(loop_label_placeholder, self.cg.blockname)
elt = elt.convert_to(self.em, list_type.elt_type)
assert elt.t is list_type.elt_type, (elt.t, list_type.elt_type)
rtn_l = Variable(list_type, rtn, 1, False)
func = rtn_l.getattr(self.em, "append")
func.call(self.em, [elt])
if is_xrange:
self.em.pl("%s = add i64 %s, 1" % (next_iter, iter_name))
assert set(self._st) == set(saved_syms), "This should have been enforced above"
phi_code = ''
for k, v in self._st.items():
v2 = saved_syms[k]
if not v.equiv(v2):
if v.t.can_convert_to(v2.t):
v = v.convert_to(self.em, v2.t)
v = v.split(self.em)
assert v.nrefs == 1
assert v.marked
assert v.t == v2.t, (v.t, v2.t)
old_name, new_name = changed_names.pop(k)
assert v2.v == new_name
phi_code += ' %s = phi %s [%s, %%%s], [%s, %%%s]' % (new_name, v.t.llvm_type(), old_name, start_label, v.v, self.cg.blockname)
self._set(k, Variable(v.t, new_name, 1, True))
# They should have all been updated:
assert not changed_names
self.em.register_replacement(phi_placeholder, phi_code)
self.em.pl("br label %%%s" % (check_label,))
self.em.indent(-4)
self.em.pl("%s:" % done_label)
self.cg.blockname = done_label
self.em.indent(4)
if not is_xrange:
iter.decvref(self.em, "end of listcomp")
return Variable(list_type, rtn, 1, True)
elif isinstance(node, _ast.IfExp):
test = self._get(node.test)
test2 = test.getattr(self.em, "__nonzero__", clsonly=True).call(self.em, [])
assert test2.t is Bool
true_label = self.em.mkname(prefix="label")
false_label = self.em.mkname(prefix="label")
end_label = self.em.mkname(prefix="label")
self.em.pl("br i1 %s, label %%%s, label %%%s" % (test2.v, true_label, false_label))
resulting_type = self._type_info.get_expr_type(self.em, node)
self.em.indent(-4)
self.em.pl("%s:" % true_label)
self.cg.blockname = true_label
self.em.indent(4)
v1 = self._get(node.body).convert_to(self.em, resulting_type)
if v1.nrefs != 1 or not v1.marked:
_v1 = v1.split(self.em)
v1.decvref(self.em)
v1 = _v1
true_end = self.cg.blockname
self.em.pl("br label %%%s" % (end_label,))
self.em.indent(-4)
self.em.pl("%s:" % false_label)
self.cg.blockname = false_label
self.em.indent(4)
v2 = self._get(node.orelse).convert_to(self.em, resulting_type)
if v2.nrefs != 1 or not v2.marked:
_v2 = v2.split(self.em)
v2.decvref(self.em)
v2 = _v2
false_end = self.cg.blockname
self.em.pl("br label %%%s" % (end_label,))
assert v1.t is resulting_type
assert v2.t is resulting_type
# Need to obey the phi discipline:
assert v1.nrefs == v2.nrefs
assert v1.marked == v2.marked
t = v1.t
self.em.indent(-4)
self.em.pl("%s:" % end_label)
self.cg.blockname = end_label
self.em.indent(4)
r = '%' + self.em.mkname()
self.em.pl("%s = phi %s [%s, %%%s], [%s, %%%s]" % (r, t.llvm_type(), v1.v, true_end, v2.v, false_end))
return Variable(t, r, 1, v1.marked)
elif isinstance(node, cfa.HasNext):
it = self._get(node.iter)
f = it.getattr(self.em, "hasnext", clsonly=True)
return f.call(self.em, [])
else:
raise Exception(node)
raise Exception("didn't return for %s" % node)
def _set(self, t, val):
# v is a Variable with one vref that this _set should consume
# (can't actually check it because it might have added other refs
# ex by adding it to the symbol table)
if isinstance(t, _ast.Name):
self._set(t.id, val)
elif isinstance(t, _ast.Subscript):
v = self._get(t.value)
s = self._get(t.slice)
f = v.getattr(self.em, "__setitem__", clsonly=True)
f.call(self.em, [s, val])
elif isinstance(t, _ast.Attribute):
v = self._get(t.value)
v.setattr(self.em, t.attr, val)
elif isinstance(t, str):
self._set_name(t, val)
elif isinstance(t, (_ast.Tuple, _ast.List)):
if isinstance(val.t, UnboxedTupleMT):
assert len(t.elts) == len(val.v)
for i in xrange(len(val.v)):
e = val.v[i]
e.incvref(self.em)
self._set(t.elts[i], e)
val.decvref(self.em)
elif isinstance(val.t, (TupleMT, ListMT)):
if isinstance(val.t, TupleMT):
assert len(t.elts) == len(val.t.elt_types)
else:
val.incvref(self.em)
r = val.getattr(self.em, "__len__", clsonly=True).call(self.em, [])
self.em.pl("call void @check_unpacking_length(i64 %d, i64 %s)" % (len(t.elts), r.v))
for i in xrange(len(t.elts)):
val.incvref(self.em)
r = val.getattr(self.em, "__getitem__", clsonly=True).call(self.em, [Variable(Int, i, 1, True)])
self._set(t.elts[i], r)
val.decvref(self.em)
else:
raise Exception(val.t)
else:
raise Exception(t)
def _set_name(self, name, v):
assert isinstance(name, str)
assert isinstance(v, Variable)
in_closure = (name in self._cr.used_in_nested) or (name in self._cr.functions) or (name in self._cr.classes) or (name in self._cr.modules)
assert (name in self._cr.globals_) + (name in self._cr.local_only) + in_closure == 1, (name, self._cr.globals_, self._cr.local_only, in_closure)
if name in self._globals:
m = self.cg.modules[self._parent_module]
m.incvref(self.em)
m.setattr(self.em, name, v)
return
# Scopes read their closure by putting the items in the local sym table,
# except for the global closure since all its items are mutable, except
# for the static ones
if in_closure:
if self._is_module:
scope = self.cg.modules[self._parent_module]
# The closure_analyzer will determine which variables are safe to assume
# as being immutable by other scopes; currently just "__foriter_" variables
put_in_st = name in self._cr.local_only
else:
scope = self._st["__closure__"]
# closure variables can't be modified by inner scopes
put_in_st = True
assert scope
v.incvref(self.em)
scope.t.set(self.em, scope.v, name, v)
# We could put it into the symbol table if it's a constant,
# but that optimization wouldn't do anything since getting
# a constant from the scope is a no-op
else:
put_in_st = True
if put_in_st:
if name in self._st:
self._st[name].decvref(self.em)
self._st[name] = v
else:
v.decvref(self.em)
def _close_block(self):
done = []
for n, v in self._st.iteritems():
if n not in self._live_at_end:
# self.em.pl("; %s not live" % (n))
v.decvref(self.em)
done.append(n)
else:
# self.em.pl("; %s live" % (n))
if n in self._vars_to_raise:
# Have to assume that all variables are owned at the end of a block; could get around this with a more complicated analysis phase (or maybe just remove them in a post-processing phase)
# similarly for raising
v2 = v.convert_to(self.em, self._vars_to_raise[n])
if v2.nrefs > 1 or not v2.marked:
v3 = v2.split(self.em)
v2.decvref(self.em)
v2 = v3
self._st[n] = v2
for n in done:
self._st.pop(n)
def pre_global(self, node):
return ()
def pre_pass(self, node):
return ()
def pre_expr(self, node):
v = self._get(node.value)
v.decvref(self.em)
return ()
def pre_branch(self, node):
v = self._get(node.test)
v2 = v.getattr(self.em, "__nonzero__", clsonly=True).call(self.em, [])
assert node.true_block
assert node.false_block
self._close_block()
if str(v2.v) == "0" or node.true_block == node.false_block:
assert 0, "untested"
self.em.pl("br label %%block%d" % (node.false_block,))
elif str(v2.v) == "1":
assert 0, "untested"
self.em.pl("br label %%block%d" % (node.true_block,))
else:
self.em.pl("br i1 %s, label %%block%d, label %%block%d" % (v2.v, node.true_block, node.false_block))
return ()
def pre_jump(self, node):
self._close_block()
self.em.pl("br label %%block%d" % (node.block_id,))
return ()
def pre_import(self, node):
for n in node.names:
assert not n.asname
assert '.' not in n.name
m = self.cg.import_module(self.em, n.name)
self._set(n.name, m)
return ()
def pre_importfrom(self, node):
m = self.cg.import_module(self.em, node.module)
for n in node.names:
m.incvref(self.em)
v = m.getattr(self.em, n.name)
local_name = n.asname or n.name
assert '.' not in n.name
assert '.' not in local_name
self._set(local_name, v)
return ()
def pre_augassign(self, node):
t = self._get(node.target)
v = self._get(node.value)
op_name = BINOP_MAP[type(node.op)]
iop_name = "__i" + op_name[2:]
rop_name = "__r" + op_name[2:]
r = self._find_and_apply_binop(t, v, (iop_name, False), (op_name, False), (rop_name, True))
self._set(node.target, r)
return ()
def pre_assign(self, node):
val = self._get(node.value)
for t in node.targets:
# Each _set will consume one vref
val.incvref(self.em)
self._set(t, val)
# consume the vref from _get
val.decvref(self.em)
return ()
def pre_print(self, node):
for i, elt in enumerate(node.values):
v = self._get(elt)
assert isinstance(v, Variable), elt
v = v.getattr(self.em, "__str__", clsonly=True).call(self.em, [])
assert v.t is Str
self.em.pl("call void @file_write(%%file* @sys_stdout, %%string* %s)" % (v.v,))
if i < len(node.values) - 1 or not node.nl:
self.em.pl("call void @print_space_if_necessary(%%string* %s)" % (v.v,))
v.decvref(self.em)
if node.nl:
self.em.pl("call void @file_write(%%file* @sys_stdout, %%string* @str_newline)" % ())
return ()
def _handle_function(self, node):
assert isinstance(node, (_ast.FunctionDef, _ast.Lambda))
assert not node.args.vararg
assert not node.args.kwarg
if self._type_info.is_dead_function(node):
print "Not compiling %s since it's dead" % (node.name,)
return None
try:
f_type = self._type_info.get_expr_type(self.em, node)
except Exception:
print "Couldn't get the type of", node.name
raise
assert isinstance(f_type, CallableMT)
defaults = []
for i, d in enumerate(node.args.defaults):
arg_idx = len(f_type.arg_types) - len(node.args.defaults) + i
d = self._get(d)
# d = d.convert_to(self.em, f_type.arg_types[arg_idx])
defaults.append(d)
# our ref to the defaults will get consumed when creating the unboxedfunctionmt variable
name = "@" + self.em.mkname(prefix="_%s_" % (getattr(node, "name", "lambda"),))
# Pass the closure created in this function if there was one,
# otherwise directly pass the parent closure.
closure = self._st.get("__closure__") or self._st.get("__parent_closure__")
closure_type = closure.t if closure else None
takes_closure = self._closure_results[self._parent_module][node].takes_closure
unboxed = UnboxedFunctionMT(name, closure_type if takes_closure else None, f_type.arg_types, f_type.rtn_type, ndefaults=len(defaults))
unboxed.initialize(self.em, "write")
self.cg.queue_function(node, self._parent_module, unboxed, name, closure_type)
return Variable(unboxed, (name, defaults, closure if takes_closure else None), 1, False)
def pre_functiondef(self, node):
var = self._handle_function(node)
if var is not None:
self._set(node.name, var)
return ()
def pre_classdef(self, node):
assert len(node.bases) == 1
base = self._get(node.bases[0])
assert isinstance(base.t, ClassMT), base.t
cls_type = self._type_info.get_expr_type(self.em, node)
assert isinstance(cls_type, ClassMT)
assert cls_type.base == base.t
for fd in node.body:
if isinstance(fd, _ast.Pass):
continue
assert isinstance(fd, _ast.FunctionDef), "Dont support %s yet" % (fd,)
func = self._handle_function(fd)
if func is not None:
cls_type.set_clsattr_value(fd.name, func, em=self.em)
for name, val in cls_type._clsattr_types.iteritems():
assert val is not None, "should have %s for %s, but don't?" % (name, node.name)
self._set(node.name, Variable(cls_type, (), 1, False))
return ()
def pre_return(self, node):
rtn_type = self._func_type.rtn_type
v = Variable(None_, "null", 1, False) if node.value is None else self._get(node.value)
v = v.convert_to(self.em, rtn_type)
if v.marked:
r = v
else:
r = v.split(self.em)
v.decvref(self.em)
self._close_block()
if rtn_type is None_:
self.em.pl("ret void")
else:
# This is the ref contract:
assert r.nrefs == 1
assert r.marked
self.em.pl("ret %s %s" % (r.t.llvm_type(), r.v))
return ()
def pre_assert(self, node):
v = self._get(node.test)
v2 = v.getattr(self.em, "__nonzero__", clsonly=True).call(self.em, [])
assert v2.t is Bool
self.em.pl("call void @assert_(i1 %s)" % v2.v)
return ()
class CodeGenerator(object):
def __init__(self, typepath, pythonpath):
self._compile_queue = None
self._typepath = typepath
self._pythonpath = pythonpath
self.modules = None # maps ast node -> usermodulemt object
self._loaded_modules = None # maps fn -> usermodulemt object
self._module_filenames = None # maps ast module -> fn
self._closure_results = None
self.type_info = None
def queue_function(self, node, parent_module, f_type, name, parent_closure_type):
self._compile_queue.append((node, parent_module, f_type, name, parent_closure_type))
def import_module(self, em, name=None, fn=None):
assert not (name and fn)
assert name or fn
if name and name in BUILTIN_MODULES:
return BUILTIN_MODULES[name].dup({})
if name:
assert '.' not in name
fns = [os.path.join(dirname, name + ".py") for dirname in self._pythonpath]
else:
fns = [fn]
assert fn.endswith(".py")
name = os.path.basename(fn)[:-3]
assert name
for fn in fns:
if os.path.exists(fn):
if fn not in self._loaded_modules:
source = open(fn).read()
node = ast_utils.parse(source, fn)
self._load_module(em, node, name, fn)
rtn = self._loaded_modules[fn]
rtn.t.load(em)
return rtn.dup({})
raise Exception("don't know how to import '%s'" % name)
def _load_module(self, em, node, name, fn):
self._closure_results[node] = closure_analyzer.analyze_closures(node)
ts_module = self.type_info.get_module(fn)
module = UserModuleMT.make(em, name, node, fn, self._closure_results[node][node], ts_module, self.type_info)
self.modules[node] = self._loaded_modules[fn] = module
self._module_filenames[node] = fn
self.queue_function(node, node, UnboxedFunctionMT(self, None, [], None_), "@%s_global" % name, None)
self.type_info.map_module(ts_module, module.t)
module.t.load_modules(em, self, self._closure_results[node][node], ts_module, self.type_info)
def compile(self, main_module, fn, llvm_f, c_f, deps_f):
assert isinstance(main_module, _ast.Module)
self.compile = None # Hacky way of saying+enforcing that this function should only be called once
# type_info = MockTypeOutput()
self.type_info = InferredTypeOutput(fn, self._typepath + self._pythonpath)
llvm_body = StringIO()
llvm_head = StringIO()
llvm_tail = StringIO()
c_head = StringIO()
c_tail = StringIO()
root_emitter = CodeEmitter((llvm_head, llvm_tail, c_head, c_tail))
llvm_head.write(eval_template("prologue", root_emitter, {}))
c_head.write(eval_ctemplate("cheader", root_emitter, {}))
# Need to have basic lists that the runtime deals with
ListMT.make_list(Str).initialize(root_emitter, "write")
ListMT.make_list(Int).initialize(root_emitter, "write")
TupleMT.make_tuple((Str, Str)).initialize(root_emitter, "write")
TupleMT.make_tuple((Int, Int)).initialize(root_emitter, "write")
BUILTINS["divmod"] = Variable(UnboxedFunctionMT(None, None, [Int, Int], TupleMT.make_tuple([Int, Int])), ("@divmod", [], None), 1, False)
FileClass.set_clsattr_value("readlines", Variable(UnboxedFunctionMT(None, None, [File], ListMT.make_list(Str)), ("@file_readlines", [], None), 1, False), _init=True)
assert "range" not in BUILTINS
# TODO xrange shouldnt do this
BUILTINS["xrange"] = BUILTINS["range"] = Variable(UnboxedFunctionMT(None, None, [Int], ListMT.make_list(Int)), ("@range", [], None), 1, False)
assert "argv" not in BUILTIN_MODULES['sys'].t._attrs
BUILTIN_MODULES['sys'].t._attrs['argv'] = Variable(ListMT.make_list(Str), "@sys_argv", 1, False)
self._compile_queue = collections.deque()
self._loaded_modules = {}
self._module_filenames = {}
self.modules = {}
self._closure_results = {}
TypeClass.initialize(root_emitter, "write")
for t in STDLIB_TYPES:
t.initialize(root_emitter, "write")
self._load_module(root_emitter, main_module, "__main__", fn)
llvm_tail.write(eval_template("epilogue", root_emitter, {
}))
def is_inlined_constant(sym, cr):
return sym in cr.functions or sym in cr.classes or sym in cr.modules
while self._compile_queue:
emitter = CodeEmitter(root_emitter)
node, parent_module, f_type, f_name, parent_closure_type = self._compile_queue.popleft()
# print "\ncompiling", getattr(node, "name", "<module>"), parent_closure_type
cr = self._closure_results[parent_module][node]
live_inputs, live_outputs, liveness_warnings = usage_checker.get_liveness(node, fn=self.modules[parent_module].t.fn)
# for (l, c), s in liveness_warnings:
# print "WARNING at %s:%s: %s" % (fn, l, s)
# defined_inputs, defined_outputs, defined_errs = defined_checker.get_definedness(node)
# assert not defined_errs, defined_errs
required_phis = phi_analyzer.determine_phi_instructions(node)
globals_ = []
if isinstance(node, _ast.FunctionDef):
globals_ = ast_utils.find_global_vars(node)
body = node.body
if isinstance(node, _ast.Lambda):
body = [_ast.Return(node.body, lineno=node.lineno, col_offset=node.col_offset, not_real=True)]
cfg = cfa.cfa(node, body)
# cfg.show()
# Need to enforce that for every transition, we can determine at some point what transition was made.
# ie on the way out of a block we either need to know where we're going, so we can prepare,
# or we need to know once we got into a block where we came from, so we can clean up.
for nid in cfg.blocks:
if nid != cfg.end and len(cfg.connects_to[nid]) > 1:
for next_id in cfg.connects_to[nid]:
assert len(cfg.connects_from[next_id]) == 1, (nid, next_id)
if nid != cfg.start and len(cfg.connects_from.get(nid, [])) > 1:
for prev_id in cfg.connects_from[nid]:
assert len(cfg.connects_to[prev_id]) == 1, (prev_id, nid)
start_sym_table = {}
defined_from_args = set()
if isinstance(node, (_ast.FunctionDef, _ast.Lambda)):
assert not node.args.vararg
assert not node.args.kwarg
for (i, a) in enumerate(node.args.args):
# This is the function arg contract: mark is not passed
raw_arg_name = "__arg%d" % (i,)
start_sym_table[raw_arg_name] = Variable(f_type.arg_types[i], "%" + raw_arg_name, 1, False)
cr.from_local.add(raw_arg_name)
for n in ast_utils.find_names(a):
defined_from_args.add(n.id)
"""
Doing a bunch of closure-results checking:
"""
nonlocal = set()
for var in live_outputs[(node, cfg.start)]:
if var in defined_from_args:
continue
else:
assert var in cr.from_closure or var in cr.from_global or var in cr.from_builtins or var in cr.globals_, var
nonlocal.add(var)
# Filter out range/xrange since the closure analyzer will report them but the compiler won't see them
# also whitelist None since it's hard to find all occurrences of it (because it can implicitly be referenced)
# also whitelist True and False, since the cfa pass can remove them
whitelist = ["range", "xrange", "None", "True", "False"]
if isinstance(node, _ast.Module):
assert not nonlocal.difference(whitelist).difference(cr.from_builtins)
else:
pass
# I don't think this is correct because the usage might not make it to being live...
# assert nonlocal.difference(whitelist) == set(cr.from_closure).union(cr.from_global).union(cr.from_builtins).difference(whitelist), (nonlocal, cr.from_closure, cr.from_global, cr.from_builtins)
for n in nonlocal:
for nid in cfg.blocks:
live_inputs[(node, nid)].pop(n, None)
live_outputs[(node, nid)].pop(n, None)
for name in cr.from_closure:
assert parent_closure_type
if cr.takes_closure:
assert parent_closure_type.has(name, include_parents=True), name
else:
assert parent_closure_type.has_constant(name, include_parents=True), name
for name in cr.from_builtins:
assert name in BUILTINS or name == "__name__", name
if not isinstance(node, _ast.Module):
for name in list(cr.from_global) + list(cr.globals_):
assert self.modules[parent_module].t.has(name), name
arg_str = ", ".join("%s %s" % (f_type.arg_types[i].llvm_type(), "%%__arg%d" % (i,)) for i in xrange(len(f_type.arg_types)))
if isinstance(node, (_ast.FunctionDef, _ast.Lambda)):
params = ""
if cr.takes_closure:
arg_str = "%s %%__parent_closure__, " % (parent_closure_type.llvm_type(),) + arg_str
if arg_str.endswith(", "):
arg_str = arg_str[:-2]
if parent_closure_type:
start_sym_table['__parent_closure__'] = Variable(parent_closure_type, "%__parent_closure__", 1, False)
for nid in cfg.blocks:
# Need to set this as live at the end of the block, so that we don't gc it
if nid != cfg.end and nid not in cfg.connects_from.get(cfg.end, []):
live_inputs[(node, nid)]['__parent_closure__'] = usage_checker.LIVE
# At some point we needed this; not sure if we still do:
if nid != cfg.start and nid != cfg.end:
live_outputs[(node, nid)]['__parent_closure__'] = usage_checker.LIVE
else:
params = ""
arg_str = ""
assert parent_closure_type is None
rtn_type = f_type.rtn_type.llvm_type() if f_type.rtn_type is not None_ else "void"
emitter.pl("define %s %s %s(%s)" % (params, rtn_type, f_name, arg_str))
emitter.pl("{")
# This is necessary if the start block is not the first one iterated over
# which is possible becaues cfg.blocks is a dict and doesnt define the iteration order
emitter.pl("func_start:")
emitter.pl(" br label %%block%d" % (cfg.start,))
emitter.pl()
sym_tables = {}
end_blocks = {} # maps node_id to the name of the final generated block in that original block
# Have to go through them in roughly program order;
# the main requirement is that for any block evaluation,
# one of that block's predecessors has already been evaluated
for node_id in sorted(cfg.blocks.keys()):
# print >>sys.stderr, "compiling block", node_id
body = cfg.blocks[node_id]
if node_id == cfg.end:
assert not body
if f_type.rtn_type is None_:
emitter.pl("block%d:" % node_id)
emitter.indent(4)
emitter.pl("ret void")
emitter.indent(-4)
continue
else:
emitter.pl("block%d:" % node_id)
emitter.indent(4)
if node_id == cfg.start:
initial_sym_table = start_sym_table
if isinstance(node, (_ast.FunctionDef, _ast.Lambda)):
body = [_ast.Assign(targets=[node.args.args[i]], value=_ast.Name(ctx=_ast.Load(), id="__arg%d" % (i,), not_real=True, lineno=node.lineno, col_offset=node.col_offset), not_real=True, lineno=node.lineno, col_offset=node.col_offset) for i in xrange(len(node.args.args))] + body
for a in node.args.args:
for n in ast_utils.find_names(a):
assert isinstance(n.ctx, (_ast.Param, _ast.Store)), (n.ctx, n.id, n.lineno)
assert n.id not in cr.globals_
assert n.id not in cr.from_global
assert n.id not in cr.from_closure
assert n.id not in cr.from_builtins
cr.from_local.add(n.id)
if isinstance(node, _ast.Module):
assert parent_closure_type is None
elif cr.has_data():
# print "allocating closure for", node.name
parent_closure = None
if cr.takes_closure:
# TODO shouldn't always use the parent closure just because it's available;
# the closure analyzer should determine when lookups pass through this scope
# to the parent one
assert "__parent_closure__" in start_sym_table
parent_closure = Variable(parent_closure_type, "%__parent_closure__", 1, False)
parent_obj = bool(parent_closure and parent_closure.v)
closure = ClosureMT.create(emitter, node, parent_closure_type, parent_obj, cr, self.type_info)
initial_sym_table["__closure__"] = closure.alloc(emitter, parent_closure if parent_obj else None)
# this would be significantly easier if we made sure that every exit path went through the exit node
# ie and just had a return phi there
for nid in cfg.blocks:
if nid != cfg.end and nid not in cfg.connects_from.get(cfg.end, []):
live_inputs[(node, nid)]['__closure__'] = usage_checker.LIVE
if nid != cfg.start and nid != cfg.end:
live_outputs[(node, nid)]['__closure__'] = usage_checker.LIVE
else:
if len(cfg.connects_from[node_id]) == 1:
assert not required_phis[node_id], (node_id, cfg.connects_from[node_id], required_phis[node_id])
initial_sym_table = {}
min_prev_id = min(cfg.connects_from[node_id])
assert min_prev_id in sym_tables
dup_cache = {}
# Different approaches to one-to-multi and multi-to-one links
if len(cfg.connects_from[node_id]) == 1:
# On one-to-multi, the predecessor can't clean out its dead symbols,
# so have to add those to the successors
starting_syms = sym_tables[min_prev_id].keys()
else:
# On multi-to-one, the successor can't know what dead variables there might be,
# so we rely on the predecessor to clean those up
starting_syms = live_outputs[(node, node_id)].keys()
for sym in starting_syms:
if sym in cr.from_global or sym in cr.globals_:
continue
elif sym in required_phis[node_id]:
types = []
def get_var_location(block):
if block in sym_tables:
types.append(sym_tables[block][sym].t)
return sym_tables[block][sym].v
return "#end_loc#%s#%d#" % (sym, block)
var = '%' + root_emitter.mkname(prefix="_%s_%d_" % (sym, node_id))
args = ', '.join('[ %s, #end_block#%d# ]' % (get_var_location(prev_id), prev_id) for prev_id in cfg.connects_from[node_id])
assert types # This is where we use the fact that we go through the blocks in program order
assert all(t == types[0] for t in types), "'%s' is live and has multiple possible types: %s (should be %s)" % (sym, types, self.type_info.get_block_starting_type(self, node, node_id, sym))
t = types[0]
emitter.pl("%s = phi %s %s" % (var, t.llvm_type(), args))
# This is the phi contract: mark is passed
initial_sym_table[sym] = Variable(t, var, 1, True)
else: # not a required phi
# Since we are only looking at live variables, and we are requiring
# that a live variable is defined on any possible path to this point,
# we can just pick any of the existing sym tables and it must have it
initial_sym_table[sym] = sym_tables[min_prev_id][sym].dup(dup_cache)
# print map(ast_utils.format_node, body)
# for n, v in initial_sym_table.iteritems():
# emitter.pl("; %s: %s" % (n, v.owned))
# All variables that will have to be phi'd have to get raised, since we can cheat no longer
vars_to_raise = {}
for sym, live in live_inputs[(node, node_id)].iteritems():
assert live == usage_checker.LIVE
for next_id in cfg.connects_to[node_id]:
if sym in required_phis[next_id]:
assert len(cfg.connects_to[node_id]) == 1, "didn't break this critical edge?"
(next_id,) = cfg.connects_to[node_id]
next_type = self.type_info.get_block_starting_type(emitter, node, next_id, sym)
vars_to_raise[sym] = next_type
break
self.blockname = "block%d" % node_id
# This is where all the code generation actually happens:
walker = CompileWalker(parent_module, node_id, f_type, self, emitter, initial_sym_table, self.type_info, live_inputs[(node, node_id)], vars_to_raise, self._closure_results, self._closure_results[parent_module][node], globals_, isinstance(node, _ast.Module))
ast_utils.crawl_ast(body, walker, err_missing=True, fn=self._module_filenames[parent_module])
sym_tables[node_id] = walker._st
end_blocks[node_id] = self.blockname
del self.blockname
# for n, v in walker._st.iteritems():
# emitter.pl("; %s: %s" % (n, v.owned))
for sym, live in live_inputs[(node, node_id)].iteritems():
if sym in cr.from_global or sym in cr.from_closure or sym in cr.from_builtins or sym in cr.globals_:
assert sym not in walker._st
continue
if sym in cr.used_in_nested:
assert isinstance(node, _ast.Module) or sym in walker._st, sym
continue
if is_inlined_constant(sym, cr):
assert isinstance(node, _ast.Module) or sym in walker._st, (sym, node_id)
continue
if sym in cr.from_local:
assert sym in walker._st
emitter.indent(-4)
emitter.pl()
emitter.pl("}")
emitter.pl()
# Verify that the non-required-phi stuff actually works
for node_id in cfg.blocks:
if node_id == cfg.start or node_id == cfg.end:
continue
for sym, live in live_outputs[(node, node_id)].iteritems():
if sym in cr.from_global or sym in cr.from_closure or sym in cr.from_builtins or sym in cr.globals_:
assert sym not in sym_tables[node_id]
continue
assert live == usage_checker.LIVE
for prev_id in cfg.connects_from[node_id]:
assert live_inputs[(node, prev_id)][sym] == usage_checker.LIVE
if sym in required_phis[node_id]:
# Make sure everyone going into the phi node followed the contract
t = None
for prev_id in cfg.connects_from[node_id]:
prev_var = sym_tables[prev_id][sym]
assert prev_var.nrefs == 1
assert prev_var.marked, (node_id, prev_id, sym, prev_var.__dict__)
prev_t = prev_var.t
assert prev_t.get_instantiated() is prev_t
if t is None:
t = prev_t
else:
assert t is prev_t, (sym, node_id, prev_id, t, prev_t)
else:
# Check to make sure that we really didn't need a phi node
var = None
var_node_id = None
for prev_id in cfg.connects_from[node_id]:
prev_var = sym_tables[prev_id][sym]
if var is None:
var = prev_var
var_node_id = prev_id
# print sym_tables[prev_id]['n'].__dict__
# print sym, prev_id, prev_var.__dict__, id(prev_var)
else:
# print sym_tables[prev_id]['n'].__dict__
# print sym, prev_id, prev_var.__dict__, id(prev_var)
assert var.equiv(prev_var), (sym, node_id, var_node_id, prev_id, var.__dict__, prev_var.__dict__)
s = emitter.get_llvm() + '\n\n'
def getloc(match):
sym, block = match.groups()
block = int(block)
return str(sym_tables[block][sym].v)
s = re.sub("#end_loc#([^#]+)#(\d+)#", getloc, s)
def getblock(match):
block, = match.groups()
block = int(block)
return "%" + end_blocks[block]
s = re.sub("#end_block#(\d+)#", getblock, s)
llvm_body.write(s)
self._body = None
def move_typedef(m):
llvm_head.write(m.group()[1:])
return "\n"
llvm_head.write("; typedefs:\n")
tail = re.sub("\n%[\w\d_]+ = type [^\n]+\n", move_typedef, llvm_tail.getvalue())
llvm_head.write('\n')
llvm_head.write("define i64 @main(i64 %argc, i8** %argv) {\n")
llvm_head.write(" call void @init_runtime(i64 %argc, i8** %argv)\n")
llvm_head.write(" call void @__main___global()\n")
# TODO: do this more cleanly
em = CodeEmitter(root_emitter)
em.indent(4)
for m in self.modules.values():
for n, t in m.t._struct.fields:
m.incvref(em)
m.getattr(em, n, skip_incref=True).getattr(em, "__decref__").call(em, [])
for n, t in m.t._struct.constants.iteritems():
t.decvref(em)
llvm_head.write("\n ; decref'ing all module attributes:\n " + em.get_llvm() + '\n\n')
llvm_head.write(" call void @teardown_runtime()\n")
llvm_head.write(" ret i64 0\n")
llvm_head.write("}\n")
def replace(s, c=False):
def getrepl(match):
rid, = match.groups()
rid = int(rid)
r = root_emitter._replacements[rid]
if c:
assert r.startswith("@")
return r[1:]
return r
return re.sub("#!(\d+)!#", getrepl, s)
llvm_f.write("; head\n")
llvm_f.write(replace(llvm_head.getvalue()))
llvm_f.write("; body\n")
llvm_f.write(replace(llvm_body.getvalue()))
llvm_f.write("; tail\n")
llvm_f.write(replace(tail))
c_f.write(replace(c_head.getvalue(), c=True))
c_f.write(replace(c_tail.getvalue(), c=True))
self._compile_queue = None
self._head = None
self._tail = None
self._out_f = None
self._cfile = None
if deps_f:
def fmt(fn):
return fn.replace(" ", "\\ ")
# TODO so hacky
deps_f.write("%s: %s\n" % (fmt(fn.replace(".py", ".gen.ll")), " ".join(fmt(s) for s in self._loaded_modules)))
for fn in self._loaded_modules:
deps_f.write("%s:\n" % fmt(fn))
deps_f.close()
for t in ListMT._ListMT__lists.itervalues():
t._MT__check_initialized("write")
| kmod/icbd | icbd/compiler/codegen.py | Python | mit | 63,826 |
#!/usr/bin/env /usr/bin/python
# Copyright 2013-2014 Clayton Smith (argilo@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gnuradio import gr, atsc, blocks, analog, digital, filter, uhd
from gnuradio.filter import firdes
from grc_gnuradio import blks2 as grc_blks2
import sys, os
import osmosdr
def main(args):
nargs = len(args)
if nargs == 1:
port = int(args[0])
outfile = None
elif nargs == 2:
port = int(args[0])
outfile = args[1]
else:
sys.stderr.write("Usage: atsc-blade.py port [output_file]\n");
sys.exit(1)
symbol_rate = 4500000.0 / 286 * 684
pilot_freq = 309441
center_freq = 441000000
tx_gain = 83 # max 89.5
tb = gr.top_block()
out = uhd.usrp_sink(
device_addr="recv_frame_size=65536,num_recv_frames=128,send_frame_size=65536,num_send_frames=128,master_clock_rate=" + str(symbol_rate*4),
stream_args=uhd.stream_args(
cpu_format="fc32",
otw_format="sc16",
channels=range(1),
),
)
out.set_samp_rate(symbol_rate)
out.set_center_freq(center_freq, 0)
out.set_gain(tx_gain, 0)
#src = blocks.udp_source(gr.sizeof_char*1, "127.0.0.1", port, 18800, True)
src = grc_blks2.tcp_source(gr.sizeof_char*1, "127.0.0.1", port, True)
pad = atsc.pad()
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
inter = atsc.interleaver()
trell = atsc.trellis_encoder()
fsm = atsc.field_sync_mux()
v2s = blocks.vector_to_stream(gr.sizeof_char, 1024)
minn = blocks.keep_m_in_n(gr.sizeof_char, 832, 1024, 4)
c2sym = digital.chunks_to_symbols_bc(([symbol + 1.25 for symbol in [-7,-5,-3,-1,1,3,5,7]]), 1)
offset = analog.sig_source_c(symbol_rate, analog.GR_COS_WAVE, -3000000 + pilot_freq, 0.9, 0)
mix = blocks.multiply_vcc(1)
rrc = filter.fft_filter_ccc(1, firdes.root_raised_cosine(0.1, symbol_rate, symbol_rate/2, 0.1152, 100))
tb.connect(src, pad, rand, rs_enc, inter, trell, fsm, v2s, minn, c2sym)
tb.connect((c2sym, 0), (mix, 0))
tb.connect((offset, 0), (mix, 1))
tb.connect(mix, rrc, out)
if outfile:
dst = blocks.file_sink(gr.sizeof_gr_complex, outfile)
tb.connect(rrc, dst)
tb.run()
if __name__ == '__main__':
main(sys.argv[1:])
| argilo/sdr-examples | atsc-b200.py | Python | gpl-3.0 | 2,908 |
from __future__ import absolute_import
try:
from sklearn.model_selection import cross_val_predict as _orig_cross_val_predict
except ImportError:
from sklearn.cross_validation import cross_val_predict as _orig_cross_val_predict
import pandas as pd
from .._xy_estimator import make_estimator, make_xy
from .._utils import verify_x_type, verify_y_type
from .__init__ import _sklearn_ver
def cross_val_predict(
estimator,
X,
y=None,
groups=None,
cv=None,
n_jobs=1,
verbose=0,
fit_params=None,
pre_dispatch='2*n_jobs',
method='predict'):
"""
Generate cross-validated estimates for each input data point.
Arguments:
estimator: estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X: :class:`pandas.DataFrame`
The data to fit.
y: The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- ``None``, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/``None`` inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns:
:class:`pandas.DataFrame` or :class:`pandas.Series` (depending on ``y``).
Example:
>>> import pandas as pd
>>> from ibex.sklearn.linear_model import LinearRegression
>>> try:
... from ibex.sklearn.model_selection import cross_val_predict
... except: # Older sklearn versions
... from ibex.sklearn.cross_validation import cross_val_predict
>>> df = pd.DataFrame({
... 'x': range(100),
... 'y': range(100),
... },
... index=['i%d' % i for i in range(100)])
>>> cross_val_predict(
... LinearRegression(),
... df[['x']],
... df['y'])
i0 ...
i1 ...
i2 ...
i3 ...
i4 ...
i5 ...
...
"""
verify_x_type(X)
verify_y_type(y)
est = make_estimator(estimator, X.index)
X_, y_ = make_xy(X, y)
if _sklearn_ver > 17:
y_hat = _orig_cross_val_predict(
est,
X_,
y_,
groups,
cv,
n_jobs,
verbose,
fit_params,
pre_dispatch,
method)
else:
if groups is not None:
raise ValueError('groups not supported for cross_val_predict in this version of sklearn')
if method != 'predict':
raise ValueError('method not supported for cross_val_predict in this version of sklearn')
y_hat = _orig_cross_val_predict(
est,
X_,
y_,
cv,
n_jobs,
verbose,
fit_params,
pre_dispatch)
if len(y_hat.shape) == 1:
return pd.Series(y_hat, index=y.index)
else:
return pd.DataFrame(y_hat, index=y.index)
| atavory/ibex | ibex/sklearn/_cross_val_predict.py | Python | bsd-3-clause | 5,032 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
These are the user properties.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from twisted.protocols.amp import Command
#, String, Integer, ListOf, Boolean, DateTime
#from ...types import ObjHash, ObjSig, ObjPubKey, ObjAddress, ObjBlob
class UserUpdate(Command):
# arguments = [(b'name', String()),
# (b'port', Integer())]
# response = [(b'hello', String())]
response = []
"""
new public key, previous one signing it
"""
class UserSync(Command):
response = []
class UserBioUpdate(Command): # blob, previous hash, sig
response = []
class UserBioSync(Command):
response = []
class UserLocationUpdate(Command): # zkp, previous hash, sig
response = []
class UserLocationSync(Command):
response = []
class UserCourierUpdate(Command): # fingerprint, ip, previous hash, sig
response = []
class UserCourierSync(Command):
response = []
class UserRendesvousUpdate(Command): # fingerprint, ip, previous hash, sig
response = []
class UserRendesvousSync(Command):
response = []
class UserReqJoinUpdate(Command): # org fingerprints summary, previous hash, sig
response = []
class UserReqJoinSync(Command):
response = []
# org fingerprints summary, commit hash, vote object, previous, sig
class UserProposalUpdate(Command):
response = []
class UserProposalSync(Command):
response = []
# org fingerprints summary, proof of membership, previous, sig
class UserMembershipUpdate(Command):
response = []
class UserMembershipSync(Command):
response = []
# the vote, my ballot, previous hash, sig
class UserCastedVotesUpdate(Command):
response = []
class UserCastedVotesSync(Command):
response = []
# hash of the starting commit, commit blob, previous hash, sig
class UserTreeUpdate(Command):
response = []
class UserTreeSync(Command):
response = []
class UserSavesUpdate(Command): # hash to save, previous saves hash, sig
response = []
class UserSavesSync(Command):
response = []
class userResponders(object):
redis = None
neo4j = None
def __init__(self):
# would pulll Redis online
pass
@UserUpdate.responder
def Update(self):
# arguments = [(b'name', String()),
# (b'port', Integer())]
# response = [(b'hello', String())]
"""
new public key, previous one signing it
"""
# checkPreviousKeySignsNewOne()
userKeyFingerprint = b'TBD'
userKey = b'TBD'
self.redis.write(b'users:' + userKeyFingerprint, userKey)
return True
@UserSync.responder
def Sync(self):
pass
@UserBioUpdate.responder
def BioUpdate(self): # blob, previous hash, sig
pass
@UserBioSync.responder
def BioSync(self):
pass
@UserLocationUpdate.responder
def LocationUpdate(self): # zkp, previous hash, sig
pass
@UserLocationSync.responder
def LocationSync(self):
pass
@UserCourierUpdate.responder
def CourierUpdate(self): # fingerprint, ip, previous hash, sig
pass
@UserCourierSync.responder
def CourierSync(self):
pass
@UserRendesvousUpdate.responder
def RendesvousUpdate(self): # fingerprint, ip, previous hash, sig
pass
@UserRendesvousSync.responder
def RendesvousSync(self):
pass
@UserReqJoinUpdate.responder
def ReqJoinUpdate(self): # org fingerprints summary, previous hash, sig
pass
@UserReqJoinSync.responder
def ReqJoinSync(self):
pass
@UserProposalUpdate.responder
# org fingerprints summary, commit hash, vote object, previous, sig
def ProposalUpdate(self):
pass
@UserProposalSync.responder
def ProposalSync(self):
pass
@UserMembershipUpdate.responder
# org fingerprints summary, proof of membership, previous, sig
def MembershipUpdate(self):
pass
@UserMembershipSync.responder
def MembershipSync(self):
pass
@UserCastedVotesUpdate.responder
def CastedVotesUpdate(self): # the vote, my ballot, previous hash, sig
pass
@UserCastedVotesSync.responder
def CastedVotesSync(self):
pass
@UserTreeUpdate.responder
def TreeUpdate(self): # hash of the starting commit, commit blob, previous hash, sig
pass
@UserTreeSync.responder
def TreeSync(self):
pass
@UserSavesUpdate.responder
def SavesUpdate(self): # hash to save, previous saves hash, sig
pass
@UserSavesSync.responder
def SavesSync(self):
pass
# congredi/commands/proofs/user.py 99 22 78% 116, 128,
# 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 173, 177, 182, 186,
# 190, 194, 198, 202, 206, 210
| congredi/congredi | congredi/commands/proofs/user.py | Python | gpl-3.0 | 4,897 |
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import *
from rest_framework_json_api.utils import format_relation_name, get_resource_type_from_instance, \
get_resource_type_from_serializer
class ResourceIdentifierObjectSerializer(BaseSerializer):
default_error_messages = {
'incorrect_model_type': _('Incorrect model type. Expected {model_type}, received {received_type}.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, *args, **kwargs):
self.model_class = kwargs.pop('model_class', None)
if 'instance' not in kwargs and not self.model_class:
raise RuntimeError('ResourceIdentifierObjectsSerializer must be initialized with a model class.')
super(ResourceIdentifierObjectSerializer, self).__init__(*args, **kwargs)
def to_representation(self, instance):
return {
'type': format_relation_name(get_resource_type_from_instance(instance)),
'id': str(instance.pk)
}
def to_internal_value(self, data):
if data['type'] != format_relation_name(self.model_class.__name__):
self.fail('incorrect_model_type', model_type=self.model_class, received_type=data['type'])
pk = data['id']
try:
return self.model_class.objects.get(pk=pk)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=pk)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data['pk']).__name__)
class SparseFieldsetsMixin(object):
def __init__(self, *args, **kwargs):
context = kwargs.get('context')
request = context.get('request') if context else None
if request:
sparse_fieldset_query_param = 'fields[{}]'.format(get_resource_type_from_serializer(self))
try:
param_name = next(key for key in request.query_params if sparse_fieldset_query_param in key)
except StopIteration:
pass
else:
fieldset = request.query_params.get(param_name).split(',')
for field_name, field in self.fields.items():
if field_name == api_settings.URL_FIELD_NAME: # leave self link there
continue
if field_name not in fieldset:
self.fields.pop(field_name)
super(SparseFieldsetsMixin, self).__init__(*args, **kwargs)
class HyperlinkedModelSerializer(SparseFieldsetsMixin, HyperlinkedModelSerializer):
"""
A type of `ModelSerializer` that uses hyperlinked relationships instead
of primary key relationships. Specifically:
* A 'url' field is included instead of the 'id' field.
* Relationships to other instances are hyperlinks, instead of primary keys.
* A mixin class to enable sparse fieldsets is included
"""
| hnakamur/django-rest-framework-json-api | rest_framework_json_api/serializers.py | Python | bsd-2-clause | 3,036 |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2014 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""A simple parser for ini-style files that's better than Python's
ConfigParser/configparser.
Functions:
read
Generate a stream of `pwkit.Holder` instances from an ini-format file.
mutate
Rewrite an ini file chunk by chunk.
write
Write a stream of `pwkit.Holder` instances to an ini-format file.
mutate_stream
Lower-level version; only operates on streams, not path names.
read_stream
Lower-level version; only operates on streams, not path names.
write_stream
Lower-level version; only operates on streams, not path names.
mutate_in_place
Rewrite an ini file specififed by its path name, in place.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('''FileChunk InifileError mutate_in_place mutate_stream
mutate read_stream read write_stream write''').split ()
import io, os, re, six
from six.moves import range
from . import Holder, PKError
sectionre = re.compile (r'^\[(.*)]\s*$')
keyre = re.compile (r'^(\S+)\s*=(.*)$') # leading space chomped later
escre = re.compile (r'^(\S+)\s*=\s*"(.*)"\s*$')
class InifileError (PKError):
pass
def read_stream (stream):
"""Python 3 compat note: we're assuming `stream` gives bytes not unicode."""
section = None
key = None
data = None
for fullline in stream:
line = fullline.split ('#', 1)[0]
m = sectionre.match (line)
if m is not None:
# New section
if section is not None:
if key is not None:
section.set_one (key, data.strip ())
key = data = None
yield section
section = Holder ()
section.section = m.group (1)
continue
if len (line.strip ()) == 0:
if key is not None:
section.set_one (key, data.strip ())
key = data = None
continue
m = escre.match (fullline)
if m is not None:
if section is None:
raise InifileError ('key seen without section!')
if key is not None:
section.set_one (key, data.strip ())
key = m.group (1)
data = m.group (2).replace (r'\"', '"').replace (r'\n', '\n').replace (r'\\', '\\')
section.set_one (key, data)
key = data = None
continue
m = keyre.match (line)
if m is not None:
if section is None:
raise InifileError ('key seen without section!')
if key is not None:
section.set_one (key, data.strip ())
key = m.group (1)
data = m.group (2)
if not len (data):
data = ' '
elif not data[-1].isspace ():
data += ' '
continue
if line[0].isspace () and key is not None:
data += line.strip () + ' '
continue
raise InifileError ('unparsable line: ' + line[:-1])
if section is not None:
if key is not None:
section.set_one (key, data.strip ())
yield section
def read (stream_or_path):
if isinstance (stream_or_path, six.string_types):
return read_stream (io.open (stream_or_path, 'rt'))
return read_stream (stream_or_path)
# Writing
def write_stream (stream, holders, defaultsection=None):
"""Very simple writing in ini format. The simple stringification of each value
in each Holder is printed, and no escaping is performed. (This is most
relevant for multiline values or ones containing pound signs.) `None` values are
skipped.
Arguments:
stream
A text stream to write to.
holders
An iterable of objects to write. Their fields will be
written as sections.
defaultsection=None
Section name to use if a holder doesn't contain a
`section` field.
"""
anybefore = False
for h in holders:
if anybefore:
print ('', file=stream)
s = h.get ('section', defaultsection)
if s is None:
raise ValueError ('cannot determine section name for item <%s>' % h)
print ('[%s]' % s, file=stream)
for k in sorted (x for x in six.iterkeys (h.__dict__) if x != 'section'):
v = h.get (k)
if v is None:
continue
print ('%s = %s' % (k, v), file=stream)
anybefore = True
def write (stream_or_path, holders, **kwargs):
"""Very simple writing in ini format. The simple stringification of each value
in each Holder is printed, and no escaping is performed. (This is most
relevant for multiline values or ones containing pound signs.) `None` values are
skipped.
Arguments:
stream
A text stream to write to.
holders
An iterable of objects to write. Their fields will be
written as sections.
defaultsection=None
Section name to use if a holder doesn't contain a
`section` field.
"""
if isinstance (stream_or_path, six.string_types):
return write_stream (io.open (stream_or_path, 'wt'), holders, **kwargs)
else:
return write_stream (stream_or_path, holders, **kwargs)
# Parsing plus inline modification, preserving the file as much as possible.
#
# I'm pretty sure that this code gets the corner cases right, but it hasn't
# been thoroughly tested, and it's a little hairy ...
class FileChunk (object):
def __init__ (self):
self.data = Holder ()
self._lines = []
def _addLine (self, line, assoc):
self._lines.append ((assoc, line))
def set (self, name, value):
newline = ((u'%s = %s' % (name, value)) + os.linesep).encode ('utf8')
first = True
for i in range (len (self._lines)):
assoc, line = self._lines[i]
if assoc != name:
continue
if first:
self._lines[i] = (assoc, newline)
first = False
else:
# delete the line
self._lines[i] = (None, None)
if first:
# Need to append the line to the last block
for i in range (len (self._lines) - 1, -1, -1):
if self._lines[i][0] is not None:
break
self._lines.insert (i + 1, (name, newline))
def emit (self, stream):
for assoc, line in self._lines:
if line is None:
continue
stream.write (line)
def mutate_stream (instream, outstream):
"""Python 3 compat note: we're assuming `stream` gives bytes not unicode."""
chunk = None
key = None
data = None
misclines = []
for fullline in instream:
line = fullline.split ('#', 1)[0]
m = sectionre.match (line)
if m is not None:
# New chunk
if chunk is not None:
if key is not None:
chunk.data.set_one (key, data.strip ())
key = data = None
yield chunk
chunk.emit (outstream)
chunk = FileChunk ()
for miscline in misclines:
chunk._addLine (miscline, None)
misclines = []
chunk.data.section = m.group (1)
chunk._addLine (fullline, None)
continue
if len (line.strip ()) == 0:
if key is not None:
chunk.data.set_one (key, data.strip ())
key = data = None
if chunk is not None:
chunk._addLine (fullline, None)
else:
misclines.append (fullline)
continue
m = escre.match (fullline)
if m is not None:
if chunk is None:
raise InifileError ('key seen without section!')
if key is not None:
chunk.data.set_one (key, data.strip ())
key = m.group (1)
data = m.group (2).replace (r'\"', '"').replace (r'\n', '\n').replace (r'\\', '\\')
chunk.data.set_one (key, data)
chunk._addLine (fullline, key)
key = data = None
continue
m = keyre.match (line)
if m is not None:
if chunk is None:
raise InifileError ('key seen without section!')
if key is not None:
chunk.data.set_one (key, data.strip ())
key = m.group (1)
data = m.group (2)
if not data[-1].isspace ():
data += ' '
chunk._addLine (fullline, key)
continue
if line[0].isspace () and key is not None:
data += line.strip () + ' '
chunk._addLine (fullline, key)
continue
raise InifileError ('unparsable line: ' + line[:-1])
if chunk is not None:
if key is not None:
chunk.data.set_one (key, data.strip ())
yield chunk
chunk.emit (outstream)
def mutate (instream_or_path, outstream_or_path, outmode='wb'):
if isinstance (instream_or_path, six.string_types):
instream_or_path = io.open (instream_or_path, 'rb')
if isinstance (outstream_or_path, six.string_types):
outstream_or_path = io.open (outstream_or_path, outmode)
return mutate_stream (instream_or_path, outstream_or_path)
def mutate_in_place (inpath):
from sys import exc_info
from os import rename, unlink
tmppath = inpath + '.new'
with io.open (inpath, 'rb') as instream:
try:
with io.open (tmppath, 'wb') as outstream:
for item in mutate_stream (instream, outstream):
yield item
rename (tmppath, inpath)
except:
try:
os.unlink (tmppath)
except Exception:
pass
raise
| pkgw/pwkit | pwkit/inifile.py | Python | mit | 10,056 |
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import CoolProp
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(111, projection='3d')
NT = 1000
NR = 1000
rho, t = np.logspace(np.log10(2e-3), np.log10(1100), NR), np.linspace(275.15, 700, NT)
RHO, T = np.meshgrid(rho, t)
P = CoolProp.CoolProp.PropsSI('P', 'D', RHO.reshape((NR * NT, 1)), 'T', T.reshape((NR * NT, 1)), 'REFPROP-Water').reshape(NT, NR)
Tsat = np.linspace(273.17, 647.0, 100)
psat = CoolProp.CoolProp.PropsSI('P', 'Q', 0, 'T', Tsat, 'Water')
rhoL = CoolProp.CoolProp.PropsSI('D', 'Q', 0, 'T', Tsat, 'Water')
rhoV = CoolProp.CoolProp.PropsSI('D', 'Q', 1, 'T', Tsat, 'Water')
ax.plot_surface(np.log(RHO), T, np.log(P), cmap=cm.jet, edgecolor='none')
ax.plot(np.log(rhoL), Tsat, np.log(psat), color='k', lw=2)
ax.plot(np.log(rhoV), Tsat, np.log(psat), color='k', lw=2)
ax.text(0.3, 800, 22, "CoolProp", size=12)
ax.set_frame_on(False)
ax.set_axis_off()
ax.view_init(22, -136)
ax.set_xlabel(r'$\ln\rho$ ')
ax.set_ylabel('$T$')
ax.set_zlabel('$p$')
plt.tight_layout()
plt.savefig('_static/PVTCP.png', transparent=True)
plt.savefig('_static/PVTCP.pdf', transparent=True)
plt.close()
| CoolProp/CoolProp | Web/scripts/logo_2013.py | Python | mit | 1,263 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import mock
from twisted.internet import defer
from twisted.internet import task
from twisted.trial import unittest
from buildbot.data import changes
from buildbot.data import resultspec
from buildbot.process.users import users
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.util import epoch2datetime
class ChangeEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = changes.ChangeEndpoint
resourceTypeClass = changes.Change
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.SourceStamp(id=234),
fakedb.Change(changeid=13, branch=u'trunk', revision=u'9283',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=234),
])
def tearDown(self):
self.tearDownEndpoint()
def test_get_existing(self):
d = self.callGet(('changes', '13'))
@d.addCallback
def check(change):
self.validateData(change)
self.assertEqual(change['project'], 'world-domination')
return d
def test_get_missing(self):
d = self.callGet(('changes', '99'))
@d.addCallback
def check(change):
self.assertEqual(change, None)
return d
class ChangesEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = changes.ChangesEndpoint
resourceTypeClass = changes.Change
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.SourceStamp(id=133),
fakedb.Change(changeid=13, branch=u'trunk', revision=u'9283',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=133),
fakedb.SourceStamp(id=144),
fakedb.Change(changeid=14, branch=u'devel', revision=u'9284',
repository=u'svn://...', codebase=u'cbsvn',
project=u'world-domination', sourcestampid=144),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get(self):
changes = yield self.callGet(('changes',))
self.validateData(changes[0])
self.assertEqual(changes[0]['changeid'], 13)
self.validateData(changes[1])
self.assertEqual(changes[1]['changeid'], 14)
@defer.inlineCallbacks
def test_getRecentChanges(self):
resultSpec = resultspec.ResultSpec(limit=1, order=['-changeid'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
self.validateData(changes[0])
self.assertEqual(changes[0]['changeid'], 14)
self.assertEqual(len(changes), 1)
@defer.inlineCallbacks
def test_getChangesOtherOrder(self):
resultSpec = resultspec.ResultSpec(limit=1, order=['-when_time_stamp'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
# limit not implemented for other order
self.assertEqual(len(changes), 2)
@defer.inlineCallbacks
def test_getChangesOtherOffset(self):
resultSpec = resultspec.ResultSpec(
limit=1, offset=1, order=['-changeid'])
changes = yield self.callGet(('changes',), resultSpec=resultSpec)
# limit not implemented for other offset
self.assertEqual(len(changes), 2)
class Change(interfaces.InterfaceTests, unittest.TestCase):
changeEvent = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
def setUp(self):
self.master = fakemaster.make_master(wantMq=True, wantDb=True,
wantData=True, testcase=self)
self.rtype = changes.Change(self.master)
def test_signature_addChange(self):
@self.assertArgSpecMatches(
self.master.data.updates.addChange, # fake
self.rtype.addChange) # real
def addChange(self, files=None, comments=None, author=None,
revision=None, when_timestamp=None, branch=None, category=None,
revlink=u'', properties=None, repository=u'', codebase=None,
project=u'', src=None):
pass
def do_test_addChange(self, kwargs,
expectedRoutingKey, expectedMessage, expectedRow,
expectedChangeUsers=[]):
clock = task.Clock()
clock.advance(10000000)
d = self.rtype.addChange(_reactor=clock, **kwargs)
def check(changeid):
self.assertEqual(changeid, 500)
# check the correct message was received
self.master.mq.assertProductions([
(expectedRoutingKey, expectedMessage),
])
# and that the correct data was inserted into the db
self.master.db.changes.assertChange(500, expectedRow)
self.master.db.changes.assertChangeUsers(500, expectedChangeUsers)
d.addCallback(check)
return d
def test_addChange(self):
# src and codebase are default here
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = self.changeEvent
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
def test_addChange_src_codebase(self):
createUserObject = mock.Mock(spec=users.createUserObject)
createUserObject.return_value = defer.succeed(123)
self.patch(users, 'createUserObject', createUserObject)
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20}, src=u'git', codebase=u'cb')
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'cb',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'cb',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='cb',
project='Buildbot',
sourcestampid=100,
)
d = self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow,
expectedChangeUsers=[123])
@d.addCallback
def check(_):
createUserObject.assert_called_once_with(
self.master, 'warner', 'git')
return d
def test_addChange_src_codebaseGenerator(self):
def preChangeGenerator(**kwargs):
return kwargs
self.master.config = mock.Mock(name='master.config')
self.master.config.preChangeGenerator = preChangeGenerator
self.master.config.codebaseGenerator = \
lambda change: 'cb-%s' % change['category']
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
revision=u'0e92a098b', revlink=u'http://warner/0e92a098b',
when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'cb-devel',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'cb-devel',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='http://warner/0e92a098b',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='cb-devel',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
def test_addChange_repository_revision(self):
self.master.config = mock.Mock(name='master.config')
self.master.config.revlink = lambda rev, repo: 'foo%sbar%sbaz' % (repo, rev)
# revlink is default here
kwargs = dict(author=u'warner', branch=u'warnerdb',
category=u'devel', comments=u'fix whitespace',
files=[u'master/buildbot/__init__.py'],
project=u'Buildbot', repository=u'git://warner',
codebase=u'', revision=u'0e92a098b', when_timestamp=256738404,
properties={u'foo': 20})
expectedRoutingKey = ('changes', '500', 'new')
# When no revlink is passed to addChange, but a repository and revision is
# passed, the revlink should be constructed by calling the revlink callable
# in the config. We thus expect a revlink of 'foogit://warnerbar0e92a098bbaz'
expectedMessage = {
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'',
'comments': u'fix whitespace',
'changeid': 500,
'files': [u'master/buildbot/__init__.py'],
'parent_changeids': [],
'project': u'Buildbot',
'properties': {u'foo': (20, u'Change')},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'foogit://warnerbar0e92a098bbaz',
'when_timestamp': 256738404,
'sourcestamp': {
'branch': u'warnerdb',
'codebase': u'',
'patch': None,
'project': u'Buildbot',
'repository': u'git://warner',
'revision': u'0e92a098b',
'created_at': epoch2datetime(10000000),
'ssid': 100,
},
# uid
}
expectedRow = fakedb.Change(
changeid=500,
author='warner',
comments='fix whitespace',
branch='warnerdb',
revision='0e92a098b',
revlink='foogit://warnerbar0e92a098bbaz',
when_timestamp=256738404,
category='devel',
repository='git://warner',
codebase='',
project='Buildbot',
sourcestampid=100,
)
return self.do_test_addChange(kwargs,
expectedRoutingKey, expectedMessage, expectedRow)
| wainersm/buildbot | master/buildbot/test/unit/test_data_changes.py | Python | gpl-2.0 | 15,376 |
#!/usr/bin/env python
from game import Game
if __name__== '__main__':
game=Game()
game.mainloop()
| panthorstudios/Gold-Rush | goldrush.py | Python | mit | 110 |
# -*- coding: utf-8 -*-
import logging
import re
from streamlink.compat import html_unescape, unquote
from streamlink.exceptions import PluginError
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents, validate
from streamlink.stream import HLSStream, HTTPStream, RTMPStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class OKru(Plugin):
_data_re = re.compile(r'''data-options=(?P<q>["'])(?P<data>{[^"']+})(?P=q)''')
_url_re = re.compile(r'''https?://(?:www\.)?ok\.ru/''')
_metadata_schema = validate.Schema(
validate.transform(parse_json),
validate.any({
'videos': validate.any(
[],
[
{
'name': validate.text,
'url': validate.text,
}
]
),
validate.optional('hlsManifestUrl'): validate.text,
validate.optional('hlsMasterPlaylistUrl'): validate.text,
validate.optional('liveDashManifestUrl'): validate.text,
validate.optional('rtmpUrl'): validate.text,
}, None)
)
_data_schema = validate.Schema(
validate.all(
validate.transform(_data_re.search),
validate.get('data'),
validate.transform(html_unescape),
validate.transform(parse_json),
validate.get('flashvars'),
validate.any({
'metadata': _metadata_schema
}, {
'metadataUrl': validate.transform(unquote)
}, None)
)
)
QUALITY_WEIGHTS = {
'full': 1080,
'1080': 1080,
'hd': 720,
'720': 720,
'sd': 480,
'480': 480,
'360': 360,
'low': 360,
'lowest': 240,
'mobile': 144,
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'okru'
return Plugin.stream_weight(key)
def _get_streams(self):
self.session.http.headers.update({
'User-Agent': useragents.FIREFOX,
'Referer': self.url,
})
try:
data = self.session.http.get(self.url, schema=self._data_schema)
except PluginError:
log.error('unable to validate _data_schema for {0}'.format(self.url))
return
metadata = data.get('metadata')
metadata_url = data.get('metadataUrl')
if metadata_url and not metadata:
metadata = self.session.http.post(metadata_url,
schema=self._metadata_schema)
if metadata:
log.trace('{0!r}'.format(metadata))
for hls_url in [metadata.get('hlsManifestUrl'),
metadata.get('hlsMasterPlaylistUrl')]:
if hls_url is not None:
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
if metadata.get('videos'):
for http_stream in metadata['videos']:
http_name = http_stream['name']
http_url = http_stream['url']
try:
http_name = '{0}p'.format(self.QUALITY_WEIGHTS[http_name])
except KeyError:
pass
yield http_name, HTTPStream(self.session, http_url)
if metadata.get('rtmpUrl'):
yield 'live', RTMPStream(self.session, params={'rtmp': metadata['rtmpUrl']})
__plugin__ = OKru
| wlerin/streamlink | src/streamlink/plugins/okru.py | Python | bsd-2-clause | 3,774 |
# -*- coding: utf-8 -*-
# Copyright (C) 2009 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
import gi
gi.require_version('Gedit', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gio, GLib, Gtk, Gedit
from .popup import Popup
from .virtualdirs import RecentDocumentsDirectory
from .virtualdirs import CurrentDocumentsDirectory
try:
import gettext
gettext.bindtextdomain('gedit')
gettext.textdomain('gedit')
_ = gettext.gettext
except:
_ = lambda s: s
class QuickOpenAppActivatable(GObject.Object, Gedit.AppActivatable):
app = GObject.Property(type=Gedit.App)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
self.app.add_accelerator("<Primary><Alt>O", "win.quickopen", None)
self.menu_ext = self.extend_menu("file-section")
item = Gio.MenuItem.new(_("Quick Open…"), "win.quickopen")
self.menu_ext.prepend_menu_item(item)
def do_deactivate(self):
self.app.remove_accelerator("win.quickopen", None)
class QuickOpenPlugin(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "QuickOpenPlugin"
window = GObject.Property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
self._popup_size = (450, 300)
self._popup = None
action = Gio.SimpleAction(name="quickopen")
action.connect('activate', self.on_quick_open_activate)
self.window.add_action(action)
def do_deactivate(self):
self.window.remove_action("quickopen")
def get_popup_size(self):
return self._popup_size
def set_popup_size(self, size):
self._popup_size = size
def _create_popup(self):
paths = []
# Open documents
paths.append(CurrentDocumentsDirectory(self.window))
doc = self.window.get_active_document()
# Current document directory
if doc and doc.get_file().is_local():
gfile = doc.get_file().get_location()
paths.append(gfile.get_parent())
# File browser root directory
bus = self.window.get_message_bus()
if bus.is_registered('/plugins/filebrowser', 'get_root'):
msg = bus.send_sync('/plugins/filebrowser', 'get_root')
if msg:
gfile = msg.props.location
if gfile and gfile.is_native():
paths.append(gfile)
# Recent documents
paths.append(RecentDocumentsDirectory())
# Local bookmarks
for path in self._local_bookmarks():
paths.append(path)
# Desktop directory
desktopdir = self._desktop_dir()
if desktopdir:
paths.append(Gio.file_new_for_path(desktopdir))
# Home directory
paths.append(Gio.file_new_for_path(os.path.expanduser('~')))
self._popup = Popup(self.window, paths, self.on_activated)
self.window.get_group().add_window(self._popup)
self._popup.set_default_size(*self.get_popup_size())
self._popup.set_transient_for(self.window)
self._popup.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self._popup.connect('destroy', self.on_popup_destroy)
def _local_bookmarks(self):
filename = os.path.expanduser('~/.config/gtk-3.0/bookmarks')
if not os.path.isfile(filename):
return []
paths = []
for line in open(filename, 'r', encoding='utf-8'):
uri = line.strip().split(" ")[0]
f = Gio.file_new_for_uri(uri)
if f.is_native():
try:
info = f.query_info(Gio.FILE_ATTRIBUTE_STANDARD_TYPE,
Gio.FileQueryInfoFlags.NONE,
None)
if info and info.get_file_type() == Gio.FileType.DIRECTORY:
paths.append(f)
except:
pass
return paths
def _desktop_dir(self):
config = os.getenv('XDG_CONFIG_HOME')
if not config:
config = os.path.expanduser('~/.config')
config = os.path.join(config, 'user-dirs.dirs')
desktopdir = None
if os.path.isfile(config):
for line in open(config, 'r', encoding='utf-8'):
line = line.strip()
if line.startswith('XDG_DESKTOP_DIR'):
parts = line.split('=', 1)
desktopdir = parts[1].strip('"').strip("'")
desktopdir = os.path.expandvars(desktopdir)
break
if not desktopdir:
desktopdir = os.path.expanduser('~/Desktop')
return desktopdir
# Callbacks
def on_quick_open_activate(self, action, parameter, user_data=None):
if not self._popup:
self._create_popup()
self._popup.show()
def on_popup_destroy(self, popup, user_data=None):
self.set_popup_size(popup.get_final_size())
self._popup = None
def on_activated(self, gfile, user_data=None):
Gedit.commands_load_location(self.window, gfile, None, -1, -1)
return True
# ex:ts=4:et:
| GNOME/gedit | plugins/quickopen/quickopen/__init__.py | Python | gpl-2.0 | 5,861 |
# Copyright 2017-2018 Aaron C. Prunty
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import os
import sys
import argparse
import numpy as np
from pathlib import Path
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True,
help='''specify the path to the directory containing the
experimental data.''')
args = parser.parse_args()
datadir = os.path.abspath(args.path)
#==============================================================================
if Path(os.path.join(datadir, 'receiverPoints.npy')).exists():
receivers = os.path.join(datadir, 'receiverPoints.npy')
else:
userResponded = False
print('''
Expected file \'receiverPoints.npy\' not found. Does a file exist
containing the receiver coordinates? (This is a required file.)
Enter 'y/yes' to specify the filename containing the coordinates of the
receivers (must be binary NumPy '.npy' format).
Enter 'n/no' or 'q/quit to exit this program. (Default)
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
receiverFile = input('Please specify the filename containing the receiver coordinates: ')
if '.npy' in receiverFile and Path(os.path.join(datadir, receiverFile)).exists():
receivers = os.path.abspath(receiverFile)
userResponded = True
break
elif '.npy' in receiverFile and not Path(os.path.join(datadir, receiverFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(receiverFile))
break
elif '.npy' not in receiverFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(receiverFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if Path(os.path.join(datadir, 'sourcePoints.npy')).exists():
sources = os.path.join(datadir, 'sourcePoints.npy')
else:
userResponded = False
print('''
Expected file \'sourcesPoints.npy\' not found. Does a file exist
containing the source coordinates? (This is a required file.)
Enter 'y/yes' to specify the filename containing the coordinates of the
sources (must be binary NumPy '.npy' format).
Enter 'n/no' or 'q/quit to exit this program. (Default)
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
sourceFile = input('Please specify the filename containing the source coordinates: ')
if '.npy' in sourceFile and Path(os.path.join(datadir, sourceFile)).exists():
sources = os.path.abspath(sourceFile)
userResponded = True
break
elif '.npy' in sourceFile and not Path(os.path.join(datadir, sourceFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(sourceFile))
break
elif '.npy' not in sourceFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(sourceFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if Path(os.path.join(datadir, 'scattererPoints.npy')).exists():
scatterer = os.path.join(datadir, 'scattererPoints.npy')
noScatterer = False
else:
userResponded = False
print('''
Expected file \'scattererPoints.npy\' not found. Does a file exist
containing the scatterer coordinates? (This is NOT a required file.)
Enter 'y/yes' to specify the filename containing the coordinates of the
scatterer (must be binary NumPy '.npy' format).
Enter 'n/no' to proceed without specifying the scatterer coordinates. (Default)
Enter 'q/quit' to exit this program.
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
scattererFile = input('Please specify the filename containing the scatterer coordinates: ')
if '.npy' in scattererFile and Path(os.path.join(datadir, scattererFile)).exists():
scatterer = os.path.abspath(scattererFile)
noScatterer = False
userResponded = True
break
elif '.npy' in scattererFile and not Path(os.path.join(datadir, scattererFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(scattererFile))
break
elif '.npy' not in scattererFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(scattererFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
print('Proceeding without specifying the scatterer coordinates.')
noScatterer = True
userResponded = True
break
elif answer == 'q' or answer == 'quit':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if Path(os.path.join(datadir, 'recordingTimes.npy')).exists():
recordingTimes = os.path.join(datadir, 'recordingTimes.npy')
else:
userResponded = False
print('''
Expected file \'recordingTimes.npy\' not found. Does a file exist
containing the recording times? (This is a required file.)
Enter 'y/yes' to specify the filename containing the recording
times (must be binary NumPy '.npy' format).
Enter 'n/no' or 'q/quit to exit this program. (Default)
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
timeFile = input('Please specify the filename containing the recording times: ')
if '.npy' in timeFile and Path(os.path.join(datadir, timeFile)).exists():
recordingTimes = os.path.abspath(timeFile)
userResponded = True
break
elif '.npy' in timeFile and not Path(os.path.join(datadir, timeFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(timeFile))
break
elif '.npy' not in timeFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(timeFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if Path(os.path.join(datadir, 'scatteredData.npy')).exists():
scatteredData = os.path.join(datadir, 'scatteredData.npy')
else:
userResponded = False
print('''
Expected file \'scatteredData.npy\' not found. Does a file exist
containing the measured scattered waves? (This is a required file.)
Enter 'y/yes' to specify the filename containing the measured
scattered waves (must be binary NumPy '.npy' format).
Enter 'n/no' or 'q/quit to exit this program. (Default)
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
dataFile = input('Please specify the filename containing the measured scattered waves: ')
if '.npy' in dataFile and Path(os.path.join(datadir, dataFile)).exists():
scatteredData = os.path.abspath(dataFile)
userResponded = True
break
elif '.npy' in dataFile and not Path(os.path.join(datadir, dataFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(dataFile))
break
elif '.npy' not in dataFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(dataFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if Path(os.path.join(datadir, 'testFunctions.npy')).exists():
testFuncs = os.path.join(datadir, 'testFunctions.npy')
noTestFuncs = False
else:
userResponded = False
print('''
Expected file \'testFunctions.npy\' not found. Does a file exist
containing the simulated test functions? (This is NOT a required file.)
Enter 'y/yes' to specify the filename containing the simulated test
functions (must be binary NumPy '.npy' format).
Enter 'n/no' to proceed without specifying the test functions. (Default)
Enter 'q/quit' to exit this program.
''')
while userResponded == False:
answer = input('Action: ')
if answer == 'y' or answer == 'yes':
testFuncsFile = input('Please specify the filename containing the simulated test functions: ')
if '.npy' in testFuncsFile and Path(os.path.join(datadir, testFuncsFile)).exists():
testFuncs = os.path.abspath(testFuncsFile)
noTestFuncs = False
userResponded = True
break
elif '.npy' in testFuncsFile and not Path(os.path.join(datadir, testFuncsFile)).exists():
print('Error: file \'%s\' does not exist within the current directory.' %(testFuncsFile))
break
elif '.npy' not in testFuncsFile:
print('''Error: file \'%s\' is not NumPy '.npy' format.''' %(testFuncsFile))
break
elif answer == '' or answer == 'n' or answer == 'no':
print('Proceeding without specifying the simulated test functions.')
noTestFuncs = True
userResponded = True
break
elif answer == 'q' or answer == 'quit':
sys.exit('Exiting program.')
else:
print('Invalid response. Please enter \'y/yes\', \'n/no\', or \'q/quit\'.')
#==============================================================================
if noScatterer and noTestFuncs:
np.savez('datadir.npz',
receivers = receivers,
sources = sources,
recordingTimes = recordingTimes,
scatteredData = scatteredData)
elif noScatterer and not noTestFuncs:
np.savez('datadir.npz',
receivers = receivers,
sources = sources,
recordingTimes = recordingTimes,
scatteredData = scatteredData,
testFuncs = testFuncs)
elif not noScatterer and noTestFuncs:
np.savez('datadir.npz',
receivers = receivers,
sources = sources,
scatterer = scatterer,
recordingTimes = recordingTimes,
scatteredData = scatteredData)
else:
np.savez('datadir.npz',
receivers = receivers,
sources = sources,
scatterer = scatterer,
recordingTimes = recordingTimes,
scatteredData = scatteredData,
testFuncs = testFuncs) | aaronprunty/starfish | vezda/setDataPath.py | Python | apache-2.0 | 13,566 |
"""Unit tests for the SonarQube source."""
from .base import SonarQubeTestCase
class SonarQubeSecurityWarningsTest(SonarQubeTestCase):
"""Unit tests for the SonarQube security warnings collector."""
METRIC_TYPE = "security_warnings"
def setUp(self):
"""Extend to set up SonarQube security warnings."""
super().setUp()
self.vulnerabilities_json = dict(
total="2",
issues=[
dict(
key="vulnerability1",
message="message1",
component="component1",
severity="INFO",
type="VULNERABILITY",
creationDate="2020-08-30T22:48:52+0200",
updateDate="2020-09-30T22:48:52+0200",
),
dict(
key="vulnerability2",
message="message2",
component="component2",
severity="MAJOR",
type="VULNERABILITY",
creationDate="2019-08-30T22:48:52+0200",
updateDate="2019-09-30T22:48:52+0200",
),
],
)
self.hotspots_json = dict(
paging=dict(total="2"),
hotspots=[
dict(
key="hotspot1",
message="message1",
component="component1",
vulnerabilityProbability="MEDIUM",
creationDate="2010-12-13T10:37:07+0000",
updateDate="2019-08-26T09:02:49+0000",
),
dict(
key="hotspot2",
message="message2",
component="component2",
vulnerabilityProbability="LOW",
creationDate="2011-10-26T13:34:12+0000",
updateDate="2020-08-31T08:19:00+0000",
),
],
)
self.hotspot_entities = [
self.entity(
key="hotspot1",
component="component1",
entity_type="security_hotspot",
message="message1",
review_priority="medium",
creation_date="2010-12-13T10:37:07+0000",
update_date="2019-08-26T09:02:49+0000",
),
self.entity(
key="hotspot2",
component="component2",
entity_type="security_hotspot",
message="message2",
review_priority="low",
creation_date="2011-10-26T13:34:12+0000",
update_date="2020-08-31T08:19:00+0000",
),
]
self.vulnerability_entities = [
self.entity(
key="vulnerability1",
component="component1",
entity_type="vulnerability",
message="message1",
severity="info",
creation_date="2020-08-30T22:48:52+0200",
update_date="2020-09-30T22:48:52+0200",
),
self.entity(
key="vulnerability2",
component="component2",
entity_type="vulnerability",
severity="major",
creation_date="2019-08-30T22:48:52+0200",
update_date="2019-09-30T22:48:52+0200",
message="message2",
),
]
async def test_all_security_warnings(self):
"""Test that all security warnings are returned."""
self.set_source_parameter("security_types", ["vulnerability", "security_hotspot"])
show_component_json = {}
response = await self.collect(
get_request_json_side_effect=[show_component_json, self.vulnerabilities_json, self.hotspots_json]
)
self.assert_measurement(
response,
value="4",
total="100",
entities=self.vulnerability_entities + self.hotspot_entities,
landing_url="https://sonarqube/dashboard?id=id&branch=master",
)
async def test_security_warnings_hotspots_only(self):
"""Test that only the security hotspots are returned."""
self.set_source_parameter("security_types", ["security_hotspot"])
response = await self.collect(get_request_json_return_value=self.hotspots_json)
self.assert_measurement(
response,
value="2",
total="100",
entities=self.hotspot_entities,
landing_url="https://sonarqube/security_hotspots?id=id&branch=master",
)
async def test_security_warnings_vulnerabilities_only(self):
"""Test that by default only the vulnerabilities are returned."""
response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)
self.assert_measurement(
response, value="2", total="100", entities=self.vulnerability_entities, landing_url=self.issues_landing_url
)
| ICTU/quality-time | components/collector/tests/source_collectors/sonarqube/test_security_warnings.py | Python | apache-2.0 | 5,049 |
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This class implements the functionality for handling plain text files, or
similar wiki type files.
Supported formats are
- Plain text
- dokuwiki
- MediaWiki
"""
import re
import six
from translate.misc.deprecation import deprecated
from translate.storage import base
dokuwiki = []
dokuwiki.append(("Dokuwiki heading", re.compile(r"( ?={2,6}[\s]*)(.+)"), re.compile("([\s]*={2,6}[\s]*)$")))
dokuwiki.append(("Dokuwiki bullet", re.compile(r"([\s]{2,}\*[\s]*)(.+)"), re.compile("[\s]+$")))
dokuwiki.append(("Dokuwiki numbered item", re.compile(r"([\s]{2,}-[\s]*)(.+)"), re.compile("[\s]+$")))
mediawiki = []
mediawiki.append(("MediaWiki heading", re.compile(r"(={1,5}[\s]*)(.+)"), re.compile("([\s]*={1,5}[\s]*)$")))
mediawiki.append(("MediaWiki bullet", re.compile(r"(\*+[\s]*)(.+)"), re.compile("[\s]+$")))
mediawiki.append(("MediaWiki numbered item", re.compile(r"(#+[\s]*)(.+)"), re.compile("[\s]+$")))
flavours = {
"dokuwiki": dokuwiki,
"mediawiki": mediawiki,
None: [],
"plain": [],
}
@six.python_2_unicode_compatible
class TxtUnit(base.TranslationUnit):
"""This class represents a block of text from a text file"""
def __init__(self, source="", **kwargs):
"""Construct the txtunit"""
super(TxtUnit, self).__init__(source)
# Note that source and target are equivalent for monolingual units.
self.source = source
self.pretext = ""
self.posttext = ""
self.location = []
def __str__(self):
"""Convert a txt unit to a string"""
return u"".join([self.pretext, self.source, self.posttext])
# Deprecated on 2.3.1
@deprecated("Use `source` property instead")
def getsource(self):
return self.source
@property
def target(self):
"""gets the unquoted target string"""
return self.source
@target.setter
def target(self, target):
"""Sets the definition to the quoted value of target"""
self._rich_target = None
self.source = target
# Deprecated on 2.3.1
@deprecated("Use `target` property instead")
def gettarget(self):
return self.target
def addlocation(self, location):
self.location.append(location)
def getlocations(self):
return self.location
class TxtFile(base.TranslationStore):
"""This class represents a text file, made up of txtunits"""
UnitClass = TxtUnit
def __init__(self, inputfile=None, flavour=None, no_segmentation=False,
**kwargs):
super(TxtFile, self).__init__(**kwargs)
self.filename = getattr(inputfile, 'name', '')
self.flavour = flavours.get(flavour, [])
self.no_segmentation = no_segmentation
if inputfile is not None:
txtsrc = inputfile.readlines()
self.parse(txtsrc)
def parse(self, lines):
"""Read in text lines and create txtunits from the blocks of text"""
if self.no_segmentation:
self.addsourceunit("".join([line.decode(self.encoding)
for line in lines]))
return
block = []
current_line = 0
pretext = ""
posttext = ""
if not isinstance(lines, list):
lines = lines.split(b"\n")
for linenum, line in enumerate(lines):
current_line = linenum + 1
line = line.decode(self.encoding).rstrip("\r\n")
for rule, prere, postre in self.flavour:
match = prere.match(line)
if match:
pretext, source = match.groups()
postmatch = postre.search(source)
if postmatch:
posttext = postmatch.group()
source = source[:postmatch.start()]
block.append(source)
isbreak = True
break
else:
isbreak = not line.strip()
if isbreak and block:
unit = self.addsourceunit("\n".join(block))
unit.addlocation("%s:%d" % (self.filename, current_line))
unit.pretext = pretext
unit.posttext = posttext
pretext = ""
posttext = ""
block = []
elif not isbreak:
block.append(line)
if block:
unit = self.addsourceunit("\n".join(block))
unit.addlocation("%s:%d" % (self.filename, current_line))
def serialize(self, out):
for idx, unit in enumerate(self.units):
if idx > 0:
out.write(b'\n\n')
out.write(six.text_type(unit).encode(self.encoding))
| diorcety/translate | translate/storage/txt.py | Python | gpl-2.0 | 5,443 |
import warnings
import numpy as np
from numpy.polynomial.hermite_e import HermiteE
from scipy.special import factorial
from scipy.stats import rv_continuous
import scipy.special as special
# TODO:
# * actually solve (31) of Blinnikov & Moessner
# * numerical stability: multiply factorials in logspace?
# * ppf & friends: Cornish & Fisher series, or tabulate/solve
_faa_di_bruno_cache = {
1: [[(1, 1)]],
2: [[(1, 2)], [(2, 1)]],
3: [[(1, 3)], [(2, 1), (1, 1)], [(3, 1)]],
4: [[(1, 4)], [(1, 2), (2, 1)], [(2, 2)], [(3, 1), (1, 1)], [(4, 1)]]}
def _faa_di_bruno_partitions(n):
"""
Return all non-negative integer solutions of the diophantine equation
n*k_n + ... + 2*k_2 + 1*k_1 = n (1)
Parameters
----------
n : int
the r.h.s. of Eq. (1)
Returns
-------
partitions : list
Each solution is itself a list of the form `[(m, k_m), ...]`
for non-zero `k_m`. Notice that the index `m` is 1-based.
Examples:
---------
>>> _faa_di_bruno_partitions(2)
[[(1, 2)], [(2, 1)]]
>>> for p in _faa_di_bruno_partitions(4):
... assert 4 == sum(m * k for (m, k) in p)
"""
if n < 1:
raise ValueError("Expected a positive integer; got %s instead" % n)
try:
return _faa_di_bruno_cache[n]
except KeyError:
# TODO: higher order terms
# solve Eq. (31) from Blinninkov & Moessner here
raise NotImplementedError('Higher order terms not yet implemented.')
def cumulant_from_moments(momt, n):
"""Compute n-th cumulant given moments.
Parameters
----------
momt : array_like
`momt[j]` contains `(j+1)`-th moment.
These can be raw moments around zero, or central moments
(in which case, `momt[0]` == 0).
n : int
which cumulant to calculate (must be >1)
Returns
-------
kappa : float
n-th cumulant.
"""
if n < 1:
raise ValueError("Expected a positive integer. Got %s instead." % n)
if len(momt) < n:
raise ValueError("%s-th cumulant requires %s moments, "
"only got %s." % (n, n, len(momt)))
kappa = 0.
for p in _faa_di_bruno_partitions(n):
r = sum(k for (m, k) in p)
term = (-1)**(r - 1) * factorial(r - 1)
for (m, k) in p:
term *= np.power(momt[m - 1] / factorial(m), k) / factorial(k)
kappa += term
kappa *= factorial(n)
return kappa
## copied from scipy.stats.distributions to avoid the overhead of
## the public methods
_norm_pdf_C = np.sqrt(2*np.pi)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_cdf(x):
return special.ndtr(x)
def _norm_sf(x):
return special.ndtr(-x)
class ExpandedNormal(rv_continuous):
"""Construct the Edgeworth expansion pdf given cumulants.
Parameters
----------
cum : array_like
`cum[j]` contains `(j+1)`-th cumulant: cum[0] is the mean,
cum[1] is the variance and so on.
Notes
-----
This is actually an asymptotic rather than convergent series, hence
higher orders of the expansion may or may not improve the result.
In a strongly non-Gaussian case, it is possible that the density
becomes negative, especially far out in the tails.
Examples
--------
Construct the 4th order expansion for the chi-square distribution using
the known values of the cumulants:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> from scipy.special import factorial
>>> df = 12
>>> chi2_c = [2**(j-1) * factorial(j-1) * df for j in range(1, 5)]
>>> edgw_chi2 = ExpandedNormal(chi2_c, name='edgw_chi2', momtype=0)
Calculate several moments:
>>> m, v = edgw_chi2.stats(moments='mv')
>>> np.allclose([m, v], [df, 2 * df])
True
Plot the density function:
>>> mu, sigma = df, np.sqrt(2*df)
>>> x = np.linspace(mu - 3*sigma, mu + 3*sigma)
>>> fig1 = plt.plot(x, stats.chi2.pdf(x, df=df), 'g-', lw=4, alpha=0.5)
>>> fig2 = plt.plot(x, stats.norm.pdf(x, mu, sigma), 'b--', lw=4, alpha=0.5)
>>> fig3 = plt.plot(x, edgw_chi2.pdf(x), 'r-', lw=2)
>>> plt.show()
References
----------
.. [*] E.A. Cornish and R.A. Fisher, Moments and cumulants in the
specification of distributions, Revue de l'Institut Internat.
de Statistique. 5: 307 (1938), reprinted in
R.A. Fisher, Contributions to Mathematical Statistics. Wiley, 1950.
.. [*] https://en.wikipedia.org/wiki/Edgeworth_series
.. [*] S. Blinnikov and R. Moessner, Expansions for nearly Gaussian
distributions, Astron. Astrophys. Suppl. Ser. 130, 193 (1998)
"""
def __init__(self, cum, name='Edgeworth expanded normal', **kwds):
if len(cum) < 2:
raise ValueError("At least two cumulants are needed.")
self._coef, self._mu, self._sigma = self._compute_coefs_pdf(cum)
self._herm_pdf = HermiteE(self._coef)
if self._coef.size > 2:
self._herm_cdf = HermiteE(-self._coef[1:])
else:
self._herm_cdf = lambda x: 0.
# warn if pdf(x) < 0 for some values of x within 4 sigma
r = np.real_if_close(self._herm_pdf.roots())
r = (r - self._mu) / self._sigma
if r[(np.imag(r) == 0) & (np.abs(r) < 4)].any():
mesg = 'PDF has zeros at %s ' % r
warnings.warn(mesg, RuntimeWarning)
kwds.update({'name': name,
'momtype': 0}) # use pdf, not ppf in self.moment()
super(ExpandedNormal, self).__init__(**kwds)
def _pdf(self, x):
y = (x - self._mu) / self._sigma
return self._herm_pdf(y) * _norm_pdf(y) / self._sigma
def _cdf(self, x):
y = (x - self._mu) / self._sigma
return (_norm_cdf(y) +
self._herm_cdf(y) * _norm_pdf(y))
def _sf(self, x):
y = (x - self._mu) / self._sigma
return (_norm_sf(y) -
self._herm_cdf(y) * _norm_pdf(y))
def _compute_coefs_pdf(self, cum):
# scale cumulants by \sigma
mu, sigma = cum[0], np.sqrt(cum[1])
lam = np.asarray(cum)
for j, l in enumerate(lam):
lam[j] /= cum[1]**j
coef = np.zeros(lam.size * 3 - 5)
coef[0] = 1.
for s in range(lam.size - 2):
for p in _faa_di_bruno_partitions(s+1):
term = sigma**(s+1)
for (m, k) in p:
term *= np.power(lam[m+1] / factorial(m+2), k) / factorial(k)
r = sum(k for (m, k) in p)
coef[s + 1 + 2*r] += term
return coef, mu, sigma
| statsmodels/statsmodels | statsmodels/distributions/edgeworth.py | Python | bsd-3-clause | 6,678 |
import os.path
import unittest
import urllib
from io import StringIO
from unittest import mock
from django.conf import settings
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.documents import models
@override_settings(WAGTAILDOCS_SERVE_METHOD=None)
class TestServeView(TestCase):
def setUp(self):
self.document = models.Document(title="Test document", file_hash="123456")
self.document.file.save("example.doc", ContentFile("A boring example document"))
self.pdf_document = models.Document(title="Test document", file_hash="123456")
self.pdf_document.file.save(
"example.pdf", ContentFile("A boring example document")
)
def tearDown(self):
if hasattr(self, "response"):
# Make sure the response is fully read before deleting the document so
# that the file is closed by the view.
# This is required on Windows as the below line that deletes the file
# will crash if the file is still open.
b"".join(self.response.streaming_content)
# delete the FieldFile directly because the TestCase does not commit
# transactions to trigger transaction.on_commit() in the signal handler
self.document.file.delete()
self.pdf_document.file.delete()
def get(self, document=None):
document = document or self.document
self.response = self.client.get(
reverse("wagtaildocs_serve", args=(document.id, document.filename))
)
return self.response
def test_response_code(self):
self.assertEqual(self.get().status_code, 200)
def test_content_disposition_header(self):
self.assertEqual(
self.get(self.document)["Content-Disposition"],
'attachment; filename="{}"'.format(self.document.filename),
)
def test_inline_content_disposition_header(self):
self.assertEqual(
self.get(self.pdf_document)["Content-Disposition"],
'inline; filename="{}"'.format(self.pdf_document.filename),
)
@mock.patch("wagtail.documents.views.serve.hooks")
@mock.patch("wagtail.documents.views.serve.get_object_or_404")
def test_non_local_filesystem_content_disposition_header(
self, mock_get_object_or_404, mock_hooks
):
"""
Tests the 'Content-Disposition' header in a response when using a
storage backend that doesn't expose filesystem paths.
"""
# Create a mock document with no local file to hit the correct code path
mock_doc = mock.Mock()
mock_doc.filename = self.document.filename
mock_doc.content_type = self.document.content_type
mock_doc.content_disposition = self.document.content_disposition
mock_doc.file = StringIO("file-like object" * 10)
mock_doc.file.path = None
mock_doc.file.url = None
mock_doc.file.size = 30
mock_get_object_or_404.return_value = mock_doc
# Bypass 'before_serve_document' hooks
mock_hooks.get_hooks.return_value = []
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename={0}; filename*=UTF-8''{0}".format(
urllib.parse.quote(self.document.filename)
),
)
@mock.patch("wagtail.documents.views.serve.hooks")
@mock.patch("wagtail.documents.views.serve.get_object_or_404")
def test_non_local_filesystem_inline_content_disposition_header(
self, mock_get_object_or_404, mock_hooks
):
"""
Tests the 'Content-Disposition' header in a response when using a
storage backend that doesn't expose filesystem paths.
"""
# Create a mock document with no local file to hit the correct code path
mock_doc = mock.Mock()
mock_doc.filename = self.pdf_document.filename
mock_doc.content_type = self.pdf_document.content_type
mock_doc.content_disposition = self.pdf_document.content_disposition
mock_doc.file = StringIO("file-like object" * 10)
mock_doc.file.path = None
mock_doc.file.url = None
mock_doc.file.size = 30
mock_get_object_or_404.return_value = mock_doc
# Bypass 'before_serve_document' hooks
mock_hooks.get_hooks.return_value = []
response = self.get(self.pdf_document)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Disposition"], "inline")
def test_content_length_header(self):
self.assertEqual(self.get()["Content-Length"], "25")
def test_content_type_header(self):
self.assertEqual(self.get()["Content-Type"], "application/msword")
def test_is_streaming_response(self):
self.assertTrue(self.get().streaming)
def test_content(self):
self.assertEqual(
b"".join(self.get().streaming_content), b"A boring example document"
)
def test_document_served_fired(self):
mock_handler = mock.MagicMock()
models.document_served.connect(mock_handler)
self.get()
self.assertEqual(mock_handler.call_count, 1)
self.assertEqual(mock_handler.mock_calls[0][2]["sender"], models.Document)
self.assertEqual(mock_handler.mock_calls[0][2]["instance"], self.document)
def test_with_nonexistent_document(self):
response = self.client.get(
reverse(
"wagtaildocs_serve",
args=(
1000,
"blahblahblah",
),
)
)
self.assertEqual(response.status_code, 404)
def test_with_incorrect_filename(self):
response = self.client.get(
reverse("wagtaildocs_serve", args=(self.document.id, "incorrectfilename"))
)
self.assertEqual(response.status_code, 404)
def test_has_etag_header(self):
self.assertEqual(self.get()["ETag"], '"123456"')
def test_has_cache_control_header(self):
self.assertIn(
self.get()["Cache-Control"],
["max-age=3600, public", "public, max-age=3600"],
)
def clear_sendfile_cache(self):
from wagtail.utils.sendfile import _get_sendfile
_get_sendfile.clear()
@override_settings(WAGTAILDOCS_SERVE_METHOD="redirect")
class TestServeViewWithRedirect(TestCase):
def setUp(self):
self.document = models.Document(title="Test document")
self.document.file.save("example.doc", ContentFile("A boring example document"))
self.serve_view_url = reverse(
"wagtaildocs_serve", args=(self.document.id, self.document.filename)
)
def tearDown(self):
self.document.delete()
def get(self):
return self.client.get(
reverse(
"wagtaildocs_serve", args=(self.document.id, self.document.filename)
)
)
def test_document_url_should_point_to_serve_view(self):
self.assertEqual(self.document.url, self.serve_view_url)
def test_redirect(self):
response = self.get()
self.assertRedirects(
response, self.document.file.url, fetch_redirect_response=False
)
@override_settings(WAGTAILDOCS_SERVE_METHOD="direct")
class TestDirectDocumentUrls(TestCase):
def setUp(self):
self.document = models.Document(title="Test document")
self.document.file.save("example.doc", ContentFile("A boring example document"))
def tearDown(self):
self.document.delete()
def get(self):
return self.client.get(
reverse(
"wagtaildocs_serve", args=(self.document.id, self.document.filename)
)
)
def test_url_should_point_directly_to_file_storage_url(self):
self.assertEqual(self.document.url, self.document.file.url)
def test_redirect(self):
# The serve view will not normally be linked to in 'direct' mode, but we should ensure it
# still works by redirecting
response = self.get()
self.assertRedirects(
response, self.document.file.url, fetch_redirect_response=False
)
@override_settings(
WAGTAILDOCS_SERVE_METHOD=None,
DEFAULT_FILE_STORAGE="wagtail.tests.dummy_external_storage.DummyExternalStorage",
)
class TestServeWithExternalStorage(TestCase):
"""
Test the behaviour of the default serve method when used with a remote storage backend
(i.e. one that throws NotImplementedError for the path() method).
"""
def setUp(self):
self.document = models.Document(title="Test document")
self.document.file.save("example.doc", ContentFile("A boring example document"))
self.serve_view_url = reverse(
"wagtaildocs_serve", args=(self.document.id, self.document.filename)
)
def tearDown(self):
self.document.delete()
def test_document_url_should_point_to_serve_view(self):
self.assertEqual(self.document.url, self.serve_view_url)
def test_redirect(self):
# serve view should redirect to the remote URL
response = self.client.get(self.serve_view_url)
self.assertRedirects(
response, self.document.file.url, fetch_redirect_response=False
)
@override_settings(WAGTAILDOCS_SERVE_METHOD=None)
class TestServeViewWithSendfile(TestCase):
def setUp(self):
# Import using a try-catch block to prevent crashes if the
# django-sendfile module is not installed
try:
import sendfile # noqa
except ImportError:
raise unittest.SkipTest("django-sendfile not installed")
self.document = models.Document(title="Test document")
self.document.file.save("example.doc", ContentFile("A boring example document"))
def tearDown(self):
# delete the FieldFile directly because the TestCase does not commit
# transactions to trigger transaction.on_commit() in the signal handler
self.document.file.delete()
def get(self):
return self.client.get(
reverse(
"wagtaildocs_serve", args=(self.document.id, self.document.filename)
)
)
def clear_sendfile_cache(self):
from wagtail.utils.sendfile import _get_sendfile
_get_sendfile.clear()
@override_settings(SENDFILE_BACKEND="sendfile.backends.xsendfile")
def test_sendfile_xsendfile_backend(self):
self.clear_sendfile_cache()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response["X-Sendfile"], self.document.file.path)
@override_settings(
SENDFILE_BACKEND="sendfile.backends.mod_wsgi",
SENDFILE_ROOT=settings.MEDIA_ROOT,
SENDFILE_URL=settings.MEDIA_URL[:-1],
)
def test_sendfile_mod_wsgi_backend(self):
self.clear_sendfile_cache()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Location"],
os.path.join(settings.MEDIA_URL, self.document.file.name),
)
@override_settings(
SENDFILE_BACKEND="sendfile.backends.nginx",
SENDFILE_ROOT=settings.MEDIA_ROOT,
SENDFILE_URL=settings.MEDIA_URL[:-1],
)
def test_sendfile_nginx_backend(self):
self.clear_sendfile_cache()
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["X-Accel-Redirect"],
os.path.join(settings.MEDIA_URL, self.document.file.name),
)
@override_settings(WAGTAILDOCS_SERVE_METHOD=None)
class TestServeWithUnicodeFilename(TestCase):
def setUp(self):
self.document = models.Document(title="Test document")
self.filename = "docs\u0627\u0644\u0643\u0627\u062a\u062f\u0631\u0627"
"\u064a\u064a\u0629_\u0648\u0627\u0644\u0633\u0648\u0642"
try:
self.document.file.save(
self.filename, ContentFile("A boring example document")
)
except UnicodeEncodeError:
raise unittest.SkipTest("Filesystem doesn't support unicode filenames")
def tearDown(self):
# delete the FieldFile directly because the TestCase does not commit
# transactions to trigger transaction.on_commit() in the signal handler
self.document.file.delete()
def test_response_code(self):
response = self.client.get(
reverse("wagtaildocs_serve", args=(self.document.id, self.filename))
)
self.assertEqual(response.status_code, 200)
@mock.patch("wagtail.documents.views.serve.hooks")
@mock.patch("wagtail.documents.views.serve.get_object_or_404")
def test_non_local_filesystem_unicode_content_disposition_header(
self, mock_get_object_or_404, mock_hooks
):
"""
Tests that a unicode 'Content-Disposition' header (for a response using
a storage backend that doesn't expose filesystem paths) doesn't cause an
error if encoded differently.
"""
# Create a mock document to hit the correct code path.
mock_doc = mock.Mock()
mock_doc.filename = "TÈST.doc"
mock_doc.file = StringIO("file-like object" * 10)
mock_doc.file.path = None
mock_doc.file.url = None
mock_doc.file.size = 30
mock_get_object_or_404.return_value = mock_doc
# Bypass 'before_serve_document' hooks
mock_hooks.get_hooks.return_value = []
response = self.client.get(
reverse("wagtaildocs_serve", args=(self.document.id, mock_doc.filename))
)
self.assertEqual(response.status_code, 200)
try:
response["Content-Disposition"].encode("ascii")
except UnicodeDecodeError:
self.fail(
"Content-Disposition with unicode characters failed ascii encoding."
)
try:
response["Content-Disposition"].encode("latin-1")
except UnicodeDecodeError:
self.fail(
"Content-Disposition with unicode characters failed latin-1 encoding."
)
| wagtail/wagtail | wagtail/documents/tests/test_views.py | Python | bsd-3-clause | 14,403 |
# yellowbrick.classifier.rocauc
# Implements visual ROC/AUC curves for classification evaluation.
#
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Author: Neal Humphrey
# Created: Wed May 18 12:39:40 2016 -0400
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: rocauc.py [5388065] neal@nhumphrey.com $
"""
Implements visual ROC/AUC curves for classification evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from ..exceptions import ModelError
from ..exceptions import YellowbrickValueError
from ..style.palettes import LINE_COLOR
from .base import ClassificationScoreVisualizer
from scipy import interp
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_curve
# Dictionary keys for ROCAUC
MACRO = "macro"
MICRO = "micro"
##########################################################################
## ROCAUC Visualizer
##########################################################################
class ROCAUC(ClassificationScoreVisualizer):
"""
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : estimator
Must be a classifier, otherwise raises YellowbrickTypeError
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
kwargs : keyword arguments passed to the super class.
Currently passing in hard-coded colors for the Receiver Operating
Characteristic curve and the diagonal.
These will be refactored to a default Yellowbrick style.
Attributes
----------
score_ : float
Global accuracy score, unless micro or macro scores are requested
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> oz = ROCAUC(LogisticRegression())
>>> oz.fit(X_train, y_train)
>>> oz.score(X_test, y_test)
>>> oz.poof()
"""
def __init__(self, model, ax=None, classes=None,
micro=True, macro=True, per_class=True, **kwargs):
super(ROCAUC, self).__init__(model, ax=ax, classes=classes, **kwargs)
# Set the visual parameters for ROCAUC
self.micro = micro
self.macro = macro
self.per_class = per_class
def score(self, X, y=None, **kwargs):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
"""
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
# Note: In the above, _get_y_scores calls either a decision_function or
# predict_proba, which should return a 2D array. But in a binary
# classification using an estimator with only a decision_function, y_pred
# will instead be 1D, meaning only one curve can be plotted. In this case,
# we set the _binary_decision attribute to True to ensure only one curve is
# computed and plotted later on.
if y_pred.ndim == 1:
self._binary_decision = True
# Raise an error if it's a binary decision and user has set micro,
# macro, or per_class to True
if self.micro or self.macro or self.per_class:
raise ModelError(
"Micro, macro, and per-class scores are not defined for "
"binary classification for estimators with only "
"decision_function methods; set micro, macro, and "
"per-class params to False."
)
else:
self._binary_decision = False
# If it's not a binary decision, at least one of micro, macro, or
# per_class must be True
if not self.micro and not self.macro and not self.per_class:
raise YellowbrickValueError(
"no curves will be drawn; specify micro, macro, or per_class"
)
# Classes may be label encoded so only use what's in y to compute.
# The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# If the decision is binary, compute the ROC curve and ROC area
if self._binary_decision == True:
self.fpr[0], self.tpr[0], _ = roc_curve(y, y_pred)
self.roc_auc[0] = auc(self.fpr[0], self.tpr[0])
else:
# Otherwise compute the ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:,i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Set score to micro average if specified
if self.micro:
self.score_ = self.roc_auc[MICRO]
# Set score to macro average if not micro
if self.macro:
self.score_ = self.roc_auc[MACRO]
# Set score to the base score if neither macro nor micro
self.score_ = self.estimator.score(X, y)
return self.score_
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.colors[0:len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self._binary_decision == True:
self.ax.plot(
self.fpr[0], self.tpr[0],
label='ROC for binary decision, AUC = {:0.2f}'.format(
self.roc_auc[0]
)
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i], self.tpr[i], color=color,
label='ROC of class {}, AUC = {:0.2f}'.format(
self.classes_[i], self.roc_auc[i],
)
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO], self.tpr[MICRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='micro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["micro"],
)
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO], self.tpr[MACRO], linestyle="--",
color= self.colors[len(self.classes_)-1],
label='macro-average ROC curve, AUC = {:0.2f}'.format(
self.roc_auc["macro"],
)
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0,1], [0,1], linestyle=':', c=LINE_COLOR)
return self.ax
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title and add the legend
self.set_title('ROC Curves for {}'.format(self.name))
self.ax.legend(loc='lower right', frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel('True Postive Rate')
self.ax.set_xlabel('False Positive Rate')
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = (
'predict_proba',
'decision_function',
)
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
# Note that because of the ordering of our attrs above,
# estimators with both will *only* ever use probability.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1-y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
##########################################################################
## Quick method for ROCAUC
##########################################################################
def roc_auc(model, X, y=None, ax=None, **kwargs):
"""ROCAUC Quick method:
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
model : the Scikit-Learn estimator
Should be an instance of a classifier, else the __init__ will
return an error.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : the axis to plot the figure on.
classes : list
A list of class names for the legend. If classes is None and a y value
is passed to fit then the classes are selected from the target vector.
Note that the curves must be computed based on what is in the target
vector passed to the ``score()`` method. Class names are used for
labeling only and must be in the correct order to prevent confusion.
micro : bool, default = True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default = True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default = True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. Per-
class classification is not defined for binary classification problems
with estimators with only a decision_function method.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
ax : matplotlib axes
Returns the axes that the roc-auc curve was drawn on.
"""
# Instantiate the visualizer
visualizer = ROCAUC(model, ax, **kwargs)
# Create the train and test splits
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
visualizer.score(X_test, y_test)
visualizer.finalize()
# Return the axes object on the visualizer
return visualizer.ax
| pdamodaran/yellowbrick | yellowbrick/classifier/rocauc.py | Python | apache-2.0 | 20,061 |
from __future__ import with_statement
import json
import logging
import os
import sys
import textwrap
from os.path import join, normpath
from tempfile import mkdtemp
import pretend
import pytest
from pip._internal.req import InstallRequirement
from pip._internal.utils.misc import rmtree
from tests.lib import assert_all_changes, create_test_package_with_setup
from tests.lib.local_repos import local_checkout, local_repo
@pytest.mark.network
def test_basic_uninstall(script):
"""
Test basic install and uninstall.
"""
result = script.pip('install', 'INITools==0.2')
assert join(script.site_packages, 'initools') in result.files_created, (
sorted(result.files_created.keys())
)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_basic_uninstall_distutils(script):
"""
Test basic install and uninstall.
"""
script.scratch_path.join("distutils_install").mkdir()
pkg_path = script.scratch_path / 'distutils_install'
pkg_path.join("setup.py").write(textwrap.dedent("""
from distutils.core import setup
setup(
name='distutils-install',
version='0.1',
)
"""))
result = script.run('python', pkg_path / 'setup.py', 'install')
result = script.pip('list', '--format=json')
assert {"name": "distutils-install", "version": "0.1"} \
in json.loads(result.stdout)
result = script.pip('uninstall', 'distutils_install', '-y',
expect_stderr=True, expect_error=True)
assert (
"Cannot uninstall 'distutils-install'. It is a distutils installed "
"project and thus we cannot accurately determine which files belong "
"to it which would lead to only a partial uninstall."
) in result.stderr
@pytest.mark.network
def test_basic_uninstall_with_scripts(script):
"""
Uninstall an easy_installed package with scripts.
"""
result = script.run('easy_install', 'PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages / 'easy-install.pth'
pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo'
assert(pylogo in result.files_updated[easy_install_pth].bytes)
result2 = script.pip('uninstall', 'pylogo', '-y')
assert_all_changes(
result,
result2,
[script.venv / 'build', 'cache', easy_install_pth],
)
@pytest.mark.network
def test_uninstall_easy_install_after_import(script):
"""
Uninstall an easy_installed package after it's been imported
"""
result = script.run('easy_install', 'INITools==0.2', expect_stderr=True)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
def test_uninstall_trailing_newline(script):
"""
Uninstall behaves appropriately if easy-install.pth
lacks a trailing newline
"""
script.run('easy_install', 'INITools==0.2', expect_stderr=True)
script.run('easy_install', 'PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages_path / 'easy-install.pth'
# trim trailing newline from easy-install.pth
with open(easy_install_pth) as f:
pth_before = f.read()
with open(easy_install_pth, 'w') as f:
f.write(pth_before.rstrip())
# uninstall initools
script.pip('uninstall', 'INITools', '-y')
with open(easy_install_pth) as f:
pth_after = f.read()
# verify that only initools is removed
before_without_initools = [
line for line in pth_before.splitlines()
if 'initools' not in line.lower()
]
lines_after = pth_after.splitlines()
assert lines_after == before_without_initools
@pytest.mark.network
def test_basic_uninstall_namespace_package(script):
"""
Uninstall a distribution with a namespace package without clobbering
the namespace and everything in it.
"""
result = script.pip('install', 'pd.requires==0.0.3', expect_error=True)
assert join(script.site_packages, 'pd') in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'pd.find', '-y', expect_error=True)
assert join(script.site_packages, 'pd') not in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
def test_uninstall_overlapping_package(script, data):
"""
Uninstalling a distribution that adds modules to a pre-existing package
should only remove those added modules, not the rest of the existing
package.
See: GitHub issue #355 (pip uninstall removes things it didn't install)
"""
parent_pkg = data.packages.join("parent-0.1.tar.gz")
child_pkg = data.packages.join("child-0.1.tar.gz")
result1 = script.pip('install', parent_pkg, expect_error=False)
assert join(script.site_packages, 'parent') in result1.files_created, (
sorted(result1.files_created.keys())
)
result2 = script.pip('install', child_pkg, expect_error=False)
assert join(script.site_packages, 'child') in result2.files_created, (
sorted(result2.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result2.files_created, sorted(result2.files_created.keys())
# The import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import parent.plugins.child_plugin, child")
result3 = script.pip('uninstall', '-y', 'child', expect_error=False)
assert join(script.site_packages, 'child') in result3.files_deleted, (
sorted(result3.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result3.files_deleted, sorted(result3.files_deleted.keys())
assert join(script.site_packages, 'parent') not in result3.files_deleted, (
sorted(result3.files_deleted.keys())
)
# Additional check: uninstalling 'child' should return things to the
# previous state, without unintended side effects.
assert_all_changes(result2, result3, [])
@pytest.mark.parametrize("console_scripts",
["test_ = distutils_install",
"test_:test_ = distutils_install"])
def test_uninstall_entry_point(script, console_scripts):
"""
Test uninstall package with two or more entry points in the same section,
whose name contain a colon.
"""
pkg_name = 'ep_install'
pkg_path = create_test_package_with_setup(
script,
name=pkg_name,
version='0.1',
entry_points={"console_scripts": [console_scripts, ],
"pip_test.ep":
["ep:name1 = distutils_install",
"ep:name2 = distutils_install"]
}
)
script_name = script.bin_path.join(console_scripts.split('=')[0].strip())
if sys.platform == 'win32':
script_name += '.exe'
result = script.pip('install', pkg_path)
assert script_name.exists
result = script.pip('list', '--format=json')
assert {"name": "ep-install", "version": "0.1"} \
in json.loads(result.stdout)
script.pip('uninstall', 'ep_install', '-y')
assert not script_name.exists
result2 = script.pip('list', '--format=json')
assert {"name": "ep-install", "version": "0.1"} \
not in json.loads(result2.stdout)
def test_uninstall_gui_scripts(script):
"""
Make sure that uninstall removes gui scripts
"""
pkg_name = "gui_pkg"
pkg_path = create_test_package_with_setup(
script,
name=pkg_name,
version='0.1',
entry_points={"gui_scripts": ["test_ = distutils_install", ], }
)
script_name = script.bin_path.join('test_')
if sys.platform == 'win32':
script_name += '.exe'
script.pip('install', pkg_path)
assert script_name.exists
script.pip('uninstall', pkg_name, '-y')
assert not script_name.exists
@pytest.mark.network
def test_uninstall_console_scripts(script):
"""
Test uninstalling a package with more files (console_script entry points,
extra directories).
"""
args = ['install']
args.append('discover')
result = script.pip(*args, **{"expect_error": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y', expect_error=True)
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
@pytest.mark.network
def test_uninstall_easy_installed_console_scripts(script):
"""
Test uninstalling package with console_scripts that is easy_installed.
"""
args = ['easy_install']
args.append('discover')
result = script.run(*args, **{"expect_stderr": True})
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
def test_uninstall_editable_from_svn(script, tmpdir):
"""
Test uninstalling an editable installation from svn.
"""
result = script.pip(
'install', '-e',
'%s#egg=initools' % local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
)
result.assert_installed('INITools')
result2 = script.pip('uninstall', '-y', 'initools')
assert (script.venv / 'src' / 'initools' in result2.files_after)
assert_all_changes(
result,
result2,
[
script.venv / 'src',
script.venv / 'build',
script.site_packages / 'easy-install.pth'
],
)
@pytest.mark.network
def test_uninstall_editable_with_source_outside_venv(script, tmpdir):
"""
Test uninstalling editable install from existing source outside the venv.
"""
cache_dir = tmpdir.join("cache")
try:
temp = mkdtemp()
tmpdir = join(temp, 'pip-test-package')
_test_uninstall_editable_with_source_outside_venv(
script,
tmpdir,
cache_dir,
)
finally:
rmtree(temp)
def _test_uninstall_editable_with_source_outside_venv(
script, tmpdir, cache_dir):
result = script.run(
'git', 'clone',
local_repo(
'git+git://github.com/pypa/pip-test-package',
cache_dir,
),
tmpdir,
expect_stderr=True,
)
result2 = script.pip('install', '-e', tmpdir)
assert join(
script.site_packages, 'pip-test-package.egg-link'
) in result2.files_created, list(result2.files_created.keys())
result3 = script.pip('uninstall', '-y',
'pip-test-package', expect_error=True)
assert_all_changes(
result,
result3,
[script.venv / 'build', script.site_packages / 'easy-install.pth'],
)
@pytest.mark.network
def test_uninstall_from_reqs_file(script, tmpdir):
"""
Test uninstall from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
-e %s#egg=initools
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result = script.pip('install', '-r', 'test-req.txt')
script.scratch_path.join("test-req.txt").write(
textwrap.dedent("""
# -f, -i, and --extra-index-url should all be ignored by uninstall
-f http://www.example.com
-i http://www.example.com
--extra-index-url http://www.example.com
-e %s#egg=initools
# and something else to test out:
PyLogo<0.4
""") %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
)
)
result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
script.venv / 'src',
script.scratch / 'test-req.txt',
script.site_packages / 'easy-install.pth',
],
)
def test_uninstallpathset_no_paths(caplog):
"""
Test UninstallPathSet logs notification when there are no paths to
uninstall
"""
from pip._internal.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
caplog.set_level(logging.INFO)
test_dist = get_distribution('pip')
uninstall_set = UninstallPathSet(test_dist)
uninstall_set.remove() # with no files added to set
assert (
"Can't uninstall 'pip'. No files were found to uninstall."
in caplog.text
)
def test_uninstall_non_local_distutils(caplog, monkeypatch, tmpdir):
einfo = tmpdir.join("thing-1.0.egg-info")
with open(einfo, "wb"):
pass
dist = pretend.stub(
key="thing",
project_name="thing",
egg_info=einfo,
location=einfo,
_provider=pretend.stub(),
)
get_dist = pretend.call_recorder(lambda x: dist)
monkeypatch.setattr("pip._vendor.pkg_resources.get_distribution", get_dist)
req = InstallRequirement.from_line("thing")
req.uninstall()
assert os.path.exists(einfo)
def test_uninstall_wheel(script, data):
"""
Test uninstalling a wheel
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index')
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created
result2 = script.pip('uninstall', 'simple.dist', '-y')
assert_all_changes(result, result2, [])
def test_uninstall_setuptools_develop_install(script, data):
"""Try uninstall after setup.py develop followed of setup.py install"""
pkg_path = data.packages.join("FSPkg")
script.run('python', 'setup.py', 'develop',
expect_stderr=True, cwd=pkg_path)
script.run('python', 'setup.py', 'install',
expect_stderr=True, cwd=pkg_path)
list_result = script.pip('list', '--format=json')
assert {"name": os.path.normcase("FSPkg"), "version": "0.1.dev0"} \
in json.loads(list_result.stdout), str(list_result)
# Uninstall both develop and install
uninstall = script.pip('uninstall', 'FSPkg', '-y')
assert any(filename.endswith('.egg')
for filename in uninstall.files_deleted.keys())
uninstall2 = script.pip('uninstall', 'FSPkg', '-y')
assert join(
script.site_packages, 'FSPkg.egg-link'
) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys())
list_result2 = script.pip('list', '--format=json')
assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)}
def test_uninstall_editable_and_pip_install(script, data):
"""Try uninstall after pip install -e after pip install"""
# SETUPTOOLS_SYS_PATH_TECHNIQUE=raw removes the assumption that `-e`
# installs are always higher priority than regular installs.
# This becomes the default behavior in setuptools 25.
script.environ['SETUPTOOLS_SYS_PATH_TECHNIQUE'] = 'raw'
pkg_path = data.packages.join("FSPkg")
script.pip('install', '-e', '.',
expect_stderr=True, cwd=pkg_path)
# ensure both are installed with --ignore-installed:
script.pip('install', '--ignore-installed', '.',
expect_stderr=True, cwd=pkg_path)
list_result = script.pip('list', '--format=json')
assert {"name": "FSPkg", "version": "0.1.dev0"} \
in json.loads(list_result.stdout)
# Uninstall both develop and install
uninstall = script.pip('uninstall', 'FSPkg', '-y')
assert not any(filename.endswith('.egg-link')
for filename in uninstall.files_deleted.keys())
uninstall2 = script.pip('uninstall', 'FSPkg', '-y')
assert join(
script.site_packages, 'FSPkg.egg-link'
) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys())
list_result2 = script.pip('list', '--format=json')
assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)}
def test_uninstall_ignores_missing_packages(script, data):
"""Uninstall of a non existent package prints a warning and exits cleanly
"""
result = script.pip(
'uninstall', '-y', 'non-existent-pkg', expect_stderr=True,
)
assert "Skipping non-existent-pkg as it is not installed." in result.stderr
assert result.returncode == 0, "Expected clean exit"
def test_uninstall_ignores_missing_packages_and_uninstalls_rest(script, data):
script.pip_install_local('simple')
result = script.pip(
'uninstall', '-y', 'non-existent-pkg', 'simple', expect_stderr=True,
)
assert "Skipping non-existent-pkg as it is not installed." in result.stderr
assert "Successfully uninstalled simple" in result.stdout
assert result.returncode == 0, "Expected clean exit"
| RonnyPfannschmidt/pip | tests/functional/test_uninstall.py | Python | mit | 17,973 |
#!/usr/bin/env python
from __future__ import with_statement
from hvad.test_utils.cli import configure
from hvad.test_utils.tmpdir import temp_dir
import argparse
import sys
def main(test_runner='hvad.test_utils.runners.NormalTestRunner', junit_output_dir='.',
time_tests=False, verbosity=1, failfast=False, test_labels=None):
if not test_labels:
test_labels = ['hvad']
with temp_dir() as STATIC_ROOT:
with temp_dir() as MEDIA_ROOT:
configure(LANGUAGE_CODE='en', TEST_RUNNER=test_runner, JUNIT_OUTPUT_DIR=junit_output_dir,
TIME_TESTS=time_tests, STATIC_ROOT=STATIC_ROOT, MEDIA_ROOT=MEDIA_ROOT)
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(pattern='*.py', verbosity=verbosity, interactive=False, failfast=failfast)
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--jenkins', action='store_true', default=False,
dest='jenkins')
parser.add_argument('--jenkins-data-dir', default='.', dest='jenkins_data_dir')
parser.add_argument('--failfast', action='store_true', default=False,
dest='failfast')
parser.add_argument('--verbosity', default=1)
parser.add_argument('--time-tests', action='store_true', default=False,
dest='time_tests')
parser.add_argument('test_labels', nargs='*')
args = parser.parse_args()
if getattr(args, 'jenkins', False):
test_runner = 'hvad.test_utils.runners.JenkinsTestRunner'
else:
test_runner = 'hvad.test_utils.runners.NormalTestRunner'
junit_output_dir = getattr(args, 'jenkins_data_dir', '.')
time_tests = getattr(args, 'time_tests', False)
test_labels = ['hvad.%s' % label for label in args.test_labels]
main(test_runner=test_runner, junit_output_dir=junit_output_dir, time_tests=time_tests,
verbosity=args.verbosity, failfast=args.failfast, test_labels=test_labels)
| promil23/django-hvad | runtests.py | Python | bsd-3-clause | 2,140 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sys import stdin
from decimal import Decimal, getcontext
__author__ = 'litleleprikon'
def main():
getcontext().prec = 6
last_key, count, last_sum = None, 0, Decimal(0.0)
for line in stdin:
key, value = line.split('\t', maxsplit=1)
if last_key is not None and last_key != key:
result = last_sum/count
print('{}\t{}'.format(last_key, result))
count, last_sum = 0, Decimal(0.0)
last_key = key
last_sum += Decimal(value)
count += 1
print('{}\t{}'.format(last_key, last_sum/count))
if __name__ == '__main__':
main()
| litleleprikon/bachelor_paper | reducer.py | Python | mit | 669 |
#!/usr/bin/python
#
# Copyright 2018 Carl Anderson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A script to query command history from a sqlite3 database.
This script fetches data from a command history database, using one of several
user-defined queries.
TOOD(cpa): add logging to this at some point.
"""
from __future__ import print_function
__author__ = 'Carl Anderson (carl.anderson@gmail.com)'
__version__ = '0.8r2'
import csv
import os
import re
import sys
# Allow the local advanced_shell_history library to be imported.
_LIB = '/usr/local/lib'
if _LIB not in sys.path:
sys.path.append(_LIB)
from advanced_shell_history import util
class Flags(util.Flags):
"""A class to manage all the flags for the command logger."""
arguments = (
('d', 'database', 'DB', str, 'a history database to query'),
('f', 'format', 'FMT', str, 'a format to display results'),
('l', 'limit', 'LINES', int, 'a limit to the number of lines returned'),
('p', 'print_query', 'NAME', str, 'print the query SQL'),
('q', 'query', 'NAME', str, 'the name of the saved query to execute'),
)
flags = (
('F', 'list_formats', 'display all available formats'),
('H', 'hide_headings', 'hide column headings from query results'),
('Q', 'list_queries', 'display all saved queries'),
)
def __init__(self):
"""Initialize the Flags."""
util.Flags.__init__(self, Flags.arguments, Flags.flags)
class Queries(object):
"""A class to store all the queries available to ash_query.py.
Queries are parsed from /usr/local/etc/advanced-shell-history/queries and
~/.ash/queries and are made available to the command line utility.
TODO(cpa): if there is an error in the file, something should be printed.
"""
queries = []
show_headings = True
parser = re.compile(r"""
\s*(?P<query_name>[A-Za-z0-9_-]+)\s*:\s*{\s*
description\s*:\s*
(?P<description>
"([^"]|\\")*" # A double-quoted string.
)\s*
sql\s*:\s*{
(?P<sql>
(
[$]{[^}]*} | # Shell variable expressions: ${FOO} or ${BAR:-0}
[^}] # Everything else in the query.
)*
)
}\s*
}""", re.VERBOSE)
@classmethod
def Init(cls):
if cls.queries: return
# Load the queries from the system query file, and also the user file.
data = []
system_queries = util.Config().GetString('SYSTEM_QUERY_FILE')
user_queries = os.path.join(os.getenv('HOME'), '.ash', 'queries')
for filename in (system_queries, user_queries):
if not filename or not os.path.exists(filename): continue
lines = [x for x in open(filename).readlines() if x and x[0] != '#']
data.extend([x[:-1] for x in lines if x[:-1]])
# Parse the loaded config files.
cls.queries = {} # {name: (description, sql)}
for match in cls.parser.finditer('\n'.join(data)):
query_name = match.group('query_name')
description = match.group('description') or '""'
cls.queries[query_name] = (description[1:-1], match.group('sql'))
@classmethod
def Get(cls, query_name):
if not query_name or not query_name in cls.queries: return (None, None)
raw = cls.queries[query_name][1]
sql = os.popen('/bin/cat <<EOF_ASH_SQL\n%s\nEOF_ASH_SQL' % raw).read()
return (raw, sql)
@classmethod
def PrintQueries(cls):
data = sorted([(query, desc) for query, (desc, _) in cls.queries.items()])
data.insert(0, ['Query', 'Description'])
AlignedFormatter.PrintRows(data)
class Formatter(object):
"""A base class for an object that formats query results into a stream."""
formatters = []
separator = ' '
show_headings = True
def __init__(self, name, desc):
Formatter.formatters.append(self)
self.name = name
self.desc = desc
@classmethod
def PrintTypes(cls):
data = sorted([(x.name, x.desc) for x in cls.formatters])
data.insert(0, ['Format', 'Description'])
AlignedFormatter.PrintRows(data)
@classmethod
def Get(cls, name):
for fmt in cls.formatters:
if fmt.name == name:
return fmt
return None
@classmethod
def GetWidths(cls, rows):
widths = [0 for _ in rows[0]]
max_column_width = 80 # TODO(cpa): make this configurable.
# Skip the headings row, if that flag was specified.
if not cls.show_headings:
rows = rows[1:]
# Calculate the min widths of each column.
for row in rows:
i = 0
for col in row:
if col:
widths[i]= max(widths[i], min(max_column_width, len(str(col))))
i += 1
return widths
class AlignedFormatter(Formatter):
@classmethod
def PrintRows(cls, rows):
# Print the result set rows aligned.
widths = Formatter.GetWidths(rows)
fmt = Formatter.separator.join(['%%%ds' % -width for width in widths])
for row in rows:
print(fmt % tuple(row))
def Print(self, rs):
AlignedFormatter.PrintRows(rs)
class AutoFormatter(Formatter):
def GetGroupedLevelCount(self, rows, widths):
"""Get the optimal number of levels to group, minimizing screen area.
Examine the columns from left to right simulating how much screen space
would be saved by grouping that column. If there is a net reduction in
screen 'area', then the column will be grouped. Otherwise it will not.
Store area of output after simulating grouping at each level successively.
the rightmost minimum area will be chosen.
For example, consider the following areas after simulating grouping:
areas = [100, 90, 92, 90, 140, 281]
With 1 level of grouping and with 3 levels of grouping we get the same
screen area, however the rightmost value is chosen, so the return value
will be 3.
"""
rows = rows[1:] # Skip headings.
XX = len(Formatter.separator)
width = sum(widths) + XX * (len(widths) - 1)
length = len(rows)
min_area = length * width
areas = [min_area for _ in widths]
for c in range(len(widths)):
# Test each row in the column to see if it is a duplicate of the previous
# row. If so, it will be de-duped in the output. If not, it means an
# extra row will be added, so we adjust the length variable accordingly.
prev = None
for row in rows:
if prev != row[c]:
length += 1
prev = row[c]
# To calculate the new width, we need to consider both the width of the
# grouped column and the width of the remaining columns. We also need to
# consider the width of the indent.
width = max(width - widths[c], widths[c]) + XX * (c + 1)
min_area = min(length * width, min_area)
if c < len(widths) - 1:
areas[c + 1] = width * length
# Find the rightmost minimum area from all simulated areas.
for c in range(len(widths), 0, -1):
if areas[c - 1] == min_area:
return c - 1
return 0
def Print(self, rs):
"""Prints a result set using the minimum screen space possible."""
if not rs: return
widths = Formatter.GetWidths(rs)
levels = self.GetGroupedLevelCount(rs, widths)
cols = len(widths)
# Print the headings.
# Each grouped heading appears on its own row, with the following row
# indented one extra separator.
if Formatter.show_headings:
for c in range(cols):
if c < levels:
grouped_header = '%s\n%s' % (rs[0][c], Formatter.separator * (c + 1))
sys.stdout.write(grouped_header)
else:
parts = ['%%%ds' % -w for w in widths[c:-1]] + ['%s']
fmt = Formatter.separator.join(parts)
print(fmt % rs[0][c:])
break
# Print the result set values.
prev = [None for _ in range(levels)]
for row in rs[1:]:
for c in range(cols):
value = row[c]
if c < levels:
if value != prev[c]:
# Within the grouped range, but the value needs to be printed.
sys.stdout.write(str(value))
if c < cols - 1:
sys.stdout.write('\n' + Formatter.separator * (c + 1))
for x in range(c, levels):
prev[x] = None
prev[c] = value
else:
# Grouped case: only print the indent.
sys.stdout.write(Formatter.separator)
else:
# Normal case: non-grouped columns.
parts = ['%%%ds' % -w for w in widths[c:-1]] + ['%s']
fmt = Formatter.separator.join(parts)
print(fmt % tuple(row)[c:])
break
class CSVFormatter(Formatter):
"""Prints a result set with values separated by commas.
Non-numeric values are quoted, regardless of whether they need quoting.
"""
def Print(self, rs):
if not Formatter.show_headings:
rs = rs[1:]
if rs:
writer = csv.writer(sys.stdout, quoting=csv.QUOTE_NONNUMERIC)
for row in rs:
writer.writerow(tuple(row))
class NullFormatter(Formatter):
"""Prints a result set with values delimited by a null character (\0)."""
def Print(self, rs):
if not Formatter.show_headings:
rs = rs[1:]
for row in rs:
print('\0'.join([str(x) for x in row]))
def InitFormatters():
"""Create instances of each Formatter available to ash_query.py."""
AlignedFormatter('aligned', 'Columns are aligned and separated with spaces.')
AutoFormatter('auto', 'Redundant values are automatically grouped.')
CSVFormatter('csv', 'Columns are comma separated with strings quoted.')
NullFormatter('null', 'Columns are null separated with strings unquoted.')
def main(argv):
# Setup.
util.InitLogging()
# Print an alert if one was specified.
flags = Flags()
# If no arguments were given, it may be best to show --help.
if len(argv) == 1:
config = util.Config()
if config.Sets('DEFAULT_QUERY'):
flags.query = config.GetString('DEFAULT_QUERY')
elif not config.GetBool('HIDE_USAGE_FOR_NO_ARGS'):
flags.PrintHelp()
# Initialize the formatters that will display the results of the query.
InitFormatters()
Formatter.show_headings = not flags.hide_headings
if flags.list_formats:
Formatter.PrintTypes()
return 0
# Read the queries from the config files.
Queries.Init()
Queries.show_headings = not flags.hide_headings
if flags.list_queries:
Queries.PrintQueries()
elif flags.print_query:
raw, sql = Queries.Get(flags.print_query)
if not raw:
sys.stderr.write('Query not found: %s\n' % flags.print_query)
return 1
if raw.strip() != sql.strip():
msg = 'Query: %s\nTemplate Form:\n%s\nActual SQL:\n%s'
print(msg % (flags.print_query, raw, sql))
else:
print('Query: %s\n%s' % (flags.print_query, sql))
elif flags.query:
# Get the formatter to be used to print the result set.
default = util.Config().GetString('DEFAULT_FORMAT') or 'aligned'
format_name = flags.format or default
fmt = Formatter.Get(format_name)
if not fmt:
sys.stderr.write('Unknown format: %s\n' % format_name)
return 1
sql = Queries.Get(flags.query)[1]
rs = util.Database().Fetch(sql, limit=flags.limit)
fmt.Print(rs)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| barabo/advanced-shell-history | python/ash_query.py | Python | apache-2.0 | 11,682 |
from __future__ import print_function, division
import numpy as np
import pytest
import sys
import chronostar.likelihood
sys.path.insert(0,'..')
from chronostar import expectmax as em
from chronostar.synthdata import SynthData
from chronostar.component import SphereComponent
from chronostar import tabletool
from chronostar import expectmax
import chronostar.synthdata as syn
# import chronostar.retired2.measurer as ms
# import chronostar.retired2.converter as cv
#
# def test_calcMedAndSpan():
# """
# Test that the median, and +- 34th percentiles is found correctly
# """
# dx = 10.
# dv = 5.
# dummy_mean = np.array([10,10,10, 5, 5, 5,np.log(dx),np.log(dv),20])
# dummy_std = np.array([1.,1.,1.,1.,1.,1.,0.5, 0.5, 3.])
# assert len(dummy_mean) == len(dummy_std)
# npars = len(dummy_mean)
#
# nsteps = 10000
# nwalkers = 18
#
# dummy_chain = np.array([np.random.randn(nsteps)*std + mean
# for (std, mean) in zip(dummy_std, dummy_mean)]).T
# np.repeat(dummy_chain, 18, axis=0).reshape(nwalkers,nsteps,npars)
#
# med_and_span = em.calcMedAndSpan(dummy_chain)
# assert np.allclose(dummy_mean, med_and_span[:,0], atol=0.1)
# approx_stds = 0.5*(med_and_span[:,1] - med_and_span[:,2])
# assert np.allclose(dummy_std, approx_stds, atol=0.1)
def test_calcMembershipProbs():
"""
Even basicer. Checks that differing overlaps are
correctly mapped to memberships.
"""
# case 1
star_ols = [10, 10]
assert np.allclose([.5,.5], em.calc_membership_probs(np.log(star_ols)))
# case 2
star_ols = [10, 30]
assert np.allclose([.25,.75], em.calc_membership_probs(np.log(star_ols)))
# case 3
star_ols = [10, 10, 20]
assert np.allclose([.25, .25, .5],
em.calc_membership_probs(np.log(star_ols)))
def test_expectation():
"""
Super basic, generates some association stars along
with some background stars and checks membership allocation
is correct
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
ass_pars2 = np.array([100., 0, 0, 20, 0, 0, 5., 2., age])
comp2 = SphereComponent(ass_pars2)
starcounts = [100,100]
synth_data = SynthData(pars=[ass_pars1, ass_pars2],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 2))
true_memb_probs[:starcounts[0], 0] = 1.
true_memb_probs[starcounts[0]:, 1] = 1.
# star_means, star_covs = tabletool.buildDataFromTable(synth_data.astr_table)
# all_lnols = em.getAllLnOverlaps(
# synth_data.astr_table, [comp1, comp2]
# )
fitted_memb_probs = em.expectation(
tabletool.build_data_dict_from_table(synth_data.table),
[comp1, comp2]
)
assert np.allclose(true_memb_probs, fitted_memb_probs, atol=1e-10)
'''
@pytest.mark.skip
def test_fit_many_comps_gradient_descent_with_multiprocessing():
"""
Added by MZ 2020 - 07 - 13
Test if maximisation works when using gradient descent and multiprocessing.
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
starcounts = [100,]
synth_data = SynthData(pars=[ass_pars1,],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 2))
true_memb_probs[:starcounts[0], 0] = 1.
true_memb_probs[starcounts[0]:, 1] = 1.
ncomps = len(starcounts)
best_comps, med_and_spans, memb_probs = \
expectmax.fit_many_comps(synth_data.table, ncomps,
rdir='test_gradient_descent_multiprocessing',
#~ init_memb_probs=None,
#~ init_comps=None,
trace_orbit_func=None,
optimisation_method='Nelder-Mead',
nprocess_ncomp = True,
)
'''
@pytest.mark.skip(reason='Too long for unit tests. Put this in integration instead')
def test_maximisation_gradient_descent_with_multiprocessing_tech():
"""
Added by MZ 2020 - 07 - 13
Test if maximisation works when using gradient descent and multiprocessing.
NOTE: this is not a test if maximisation returns appropriate results but
it only tests if the code runs withour errors. This is mainly to test
multiprocessing.
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
starcounts = [100,]
synth_data = SynthData(pars=[ass_pars1,],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 1))
true_memb_probs[:starcounts[0], 0] = 1.
#~ true_memb_probs[starcounts[0]:, 1] = 1.
ncomps = len(starcounts)
noise = np.random.rand(ass_pars1.shape[0])*5
all_init_pars = [ass_pars1 + noise]
new_comps, all_samples, _, all_init_pos, success_mask =\
expectmax.maximisation(synth_data.table, ncomps,
true_memb_probs, 100, 'iter00',
all_init_pars,
optimisation_method='Nelder-Mead',
nprocess_ncomp=True,
)
# TODO: test if new_comps, all_samples, _, all_init_pos, success_mask are of the right format.
# def test_background_overlaps():
# """
# Author: Marusa Zerjal, 2019 - 05 - 26
# Compare background overlap with KDE and background overlap with tiny covariance matrix
# :return:
# """
# background_means = tabletool.build_data_dict_from_table(kernel_density_input_datafile,
# only_means=True,
# )
# ln_bg_ols_kde = em.get_kernel_densities(background_means,
# # star_means, )
if __name__=='__main__':
test_maximisation_gradient_descent_with_multiprocessing_tech()
| mikeireland/chronostar | unit_tests/test_unit_expectmax.py | Python | mit | 6,240 |
# -*- coding: utf-8 -*-
"""
-------------------------------------
N A C S P Y T H O N S C R I P T
-------------------------------------
NACS version: 2.0.2745 - pre3
NACS architecture: CENTOS 5.11 (X86_64)
File generated at Tue Jan 20 16:55:05 2015
On host 'lse86' by 'cae42'
"""
from __future__ import division
try:
from nacs.scripting import *
except:
raise Exception("File is only executable in the NACS python interpreter!")
# =================
# NACS SIMULATION
# =================
simulation = NacsSimulation()
simulation.setGrid(u'project3.nmf', 'plane')
simulation.addOutput(Output.Nacs())
text = Output.Text()
simulation.addOutput(text)
simulation.addOutput(Output.GiD())
# =====================
# MATERIAL DEFINITION
# =====================
copper = Material('Copper')
copper.density(8940.0)
copper.lossTangensDelta([1000],[0.002])
copper.stiffness.isotropic.byENu(1.15e+11, 0.35)
steel = Material('Steel')
steel.density(7850)
steel.lossTangensDelta([1000],[0.0003])
steel.stiffness.isotropic.byENu(1.95e+11, 0.28)
silicon = Material('Silicon')
silicon.density(2208)
silicon.stiffness.isotropic.byENu(67500000000.0, 0.1)
simulation.setMat('exc_f_r', copper)
simulation.setMat('rec_f_r', copper)
simulation.setMat('sen_coat_r', steel)
simulation.setMat('silicon_r', silicon)
# ===============
# ANALYSIS STEP
# ===============
harm1 = Analysis.Harmonic()
harm1.set(1, 1373000000.0, 1373000000.0, 'log')
mech1 = Physic.Mechanic('planeStrain')
mech1.addRegions(['exc_f_r', 'sen_coat_r', 'silicon_r', 'rec_f_r'])
mech1.addBc(mech1.BC.Force.expr('exc_f_r', 'y', "-1000"))
mech1.addBc(mech1.BC.Fix('outerbounds_bot', ['x', 'y']))
mech1.addResult(mech1.Result.Displacement(['exc_f_r', 'rec_f_r', 'sen_coat_r', 'silicon_r']))
mech1.addResult(mech1.Result.Displacement(['observer_point_1', 'observer_point_2', 'observer_point_3', 'observer_point_4', 'observer_point_e4'], 'amplPhase', 'mesh', [text]))
harm1.addPhysic(mech1)
simulation.addAnalysis(harm1)
| cosailer/caeproject | simulation_result/3/project3_harmonic_sub.py | Python | gpl-2.0 | 1,987 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.