hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9304009ae75f30470c311ee470ff6e1eeb87fdbd
| 10,110
|
py
|
Python
|
lib/bridgedb/parse/networkstatus.py
|
wfn/bridgedb
|
f266f32c365eb7a16cf156cc02f7e492266a7b51
|
[
"BSD-3-Clause-Clear"
] | 1
|
2016-09-21T12:55:21.000Z
|
2016-09-21T12:55:21.000Z
|
lib/bridgedb/parse/networkstatus.py
|
wfn/bridgedb
|
f266f32c365eb7a16cf156cc02f7e492266a7b51
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
lib/bridgedb/parse/networkstatus.py
|
wfn/bridgedb
|
f266f32c365eb7a16cf156cc02f7e492266a7b51
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013 Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
"""Parsers for ``@type bridge-network-status 1.0`` descriptors.
.. _descriptors: https://metrics.torproject.org/formats.html#descriptortypes
**Module Overview:**
..
parse
\_networkstatus
|_ isValidRouterNickname - Determine if a nickname is according to spec
|_ parseRLine - Parse an 'r'-line from a networkstatus document
|_ parseALine - Parse an 'a'-line from a networkstatus document
\_ parseSLine - Parse an 's'-line from a networkstatus document
"""
import binascii
import logging
import string
import time
import warnings
from twisted.python.log import showwarning
from bridgedb.parse import addr
from bridgedb.parse import parseUnpaddedBase64
from bridgedb.parse import InvalidBase64
class NetworkstatusParsingError(Exception):
"""Unable to parse networkstatus document line."""
class InvalidNetworkstatusRouterIdentity(ValueError):
"""The ID field of a networkstatus document 'r'-line is invalid."""
class InvalidRouterNickname(ValueError):
"""Router nickname doesn't follow tor-spec."""
def isValidRouterNickname(nickname):
"""Determine if a router's given nickname meets the specification.
:param string nickname: An OR's nickname.
"""
ALPHANUMERIC = string.letters + string.digits
if not (1 <= len(nickname) <= 19):
raise InvalidRouterNickname(
"Nicknames must be between 1 and 19 characters: %r" % nickname)
for letter in nickname:
if not letter in ALPHANUMERIC:
raise InvalidRouterNickname(
"Nicknames must only use [A-Za-z0-9]: %r" % nickname)
return True
def parseRLine(line):
"""Parse an 'r'-line from a networkstatus document.
From torspec.git/dir-spec.txt, commit 36761c7d553d L1499-1512:
|
|"r" SP nickname SP identity SP digest SP publication SP IP SP ORPort
| SP DirPort NL
|
| [At start, exactly once.]
|
| "Nickname" is the OR's nickname. "Identity" is a hash of its
| identity key, encoded in base64, with trailing equals sign(s)
| removed. "Digest" is a hash of its most recent descriptor as
| signed (that is, not including the signature), encoded in base64.
| "Publication" is the
| publication time of its most recent descriptor, in the form
| YYYY-MM-DD HH:MM:SS, in UTC. "IP" is its current IP address;
| ORPort is its current OR port, "DirPort" is its current directory
| port, or "0" for "none".
|
:param string line: An 'r'-line from an bridge-network-status descriptor.
"""
(nickname, ID, descDigest, timestamp,
ORaddr, ORport, dirport) = (None for x in xrange(7))
try:
if not line.startswith('r '):
raise NetworkstatusParsingError(
"Networkstatus parser received non 'r'-line: %r" % line)
line = line[2:] # Chop off the 'r '
fields = line.split()
if len(fields) != 8:
raise NetworkstatusParsingError(
"Wrong number of fields in networkstatus 'r'-line: %r" % line)
nickname, ID = fields[:2]
try:
ID = parseUnpaddedBase64(ID)
except InvalidBase64 as error:
raise InvalidNetworkstatusRouterIdentity(error)
# Check the nickname validity after parsing the ID, otherwise, if the
# nickname is invalid, we end up with the nickname being ``None`` and
# the ID being unparsed, unpadded (meaning it is technically invalid)
# base64.
isValidRouterNickname(nickname)
except NetworkstatusParsingError as error:
logging.error(error)
nickname, ID = None, None
except InvalidRouterNickname as error:
logging.error(error)
# Assume that we mostly care about obtaining the OR's ID, then it
# should be okay to set the nickname to ``None``, if it was invalid.
nickname = None
except InvalidNetworkstatusRouterIdentity as error:
logging.error(error)
ID = None
else:
try:
descDigest = parseUnpaddedBase64(fields[2])
timestamp = time.mktime(time.strptime(" ".join(fields[3:5]),
"%Y-%m-%d %H:%M:%S"))
ORaddr = fields[5]
ORport = int(fields[6])
dirport = fields[7]
except InvalidBase64 as error:
logging.error(error)
descDigest = None
except (AttributeError, ValueError, IndexError) as error:
logging.error(error)
timestamp = None
finally:
return (nickname, ID, descDigest, timestamp, ORaddr, ORport, dirport)
def parseALine(line, fingerprint=None):
"""Parse an 'a'-line of a bridge networkstatus document.
From torspec.git/dir-spec.txt, commit 36761c7d553d L1499-1512:
|
| "a" SP address ":" port NL
|
| [Any number.]
|
| Present only if the OR has at least one IPv6 address.
|
| Address and portlist are as for "or-address" as specified in
| 2.1.
|
| (Only included when the vote or consensus is generated with
| consensus-method 14 or later.)
:param string line: An 'a'-line from an bridge-network-status descriptor.
:type fingerprint: string or None
:param fingerprint: A string which identifies which OR the descriptor
we're parsing came from (since the 'a'-line doesn't tell us, this can
help make the log messages clearer).
:raises: :exc:`NetworkstatusParsingError`
:rtype: tuple
:returns: A 2-tuple of a string respresenting the IP address and a
:class:`bridgedb.parse.addr.PortList`.
"""
ip = None
portlist = None
if line.startswith('a '):
line = line[2:] # Chop off the 'a '
else:
logging.warn("Networkstatus parser received non 'a'-line for %r:"\
" %r" % (fingerprint or 'Unknown', line))
try:
ip, portlist = line.rsplit(':', 1)
except ValueError as error:
logging.error("Bad separator in networkstatus 'a'-line: %r" % line)
return (None, None)
if ip.startswith('[') and ip.endswith(']'):
ip = ip.strip('[]')
try:
if not addr.isIPAddress(ip):
raise NetworkstatusParsingError(
"Got invalid IP Address in networkstatus 'a'-line for %r: %r"
% (fingerprint or 'Unknown', line))
if addr.isIPv4(ip):
warnings.warn(FutureWarning(
"Got IPv4 address in networkstatus 'a'-line! "\
"Networkstatus document format may have changed!"))
except NetworkstatusParsingError as error:
logging.error(error)
ip, portlist = None, None
try:
portlist = addr.PortList(portlist)
if not portlist:
raise NetworkstatusParsingError(
"Got invalid portlist in 'a'-line for %r!\n Line: %r"
% (fingerprint or 'Unknown', line))
except (addr.InvalidPort, NetworkstatusParsingError) as error:
logging.error(error)
portlist = None
else:
logging.debug("Parsed networkstatus ORAddress line for %r:"\
"\n Address: %s \tPorts: %s"
% (fingerprint or 'Unknown', ip, portlist))
finally:
return (ip, portlist)
def parseSLine(line):
"""Parse an 's'-line from a bridge networkstatus document.
The 's'-line contains all flags assigned to a bridge. The flags which may
be assigned to a bridge are as follows:
From torspec.git/dir-spec.txt, commit 36761c7d553d L1526-1554:
|
| "s" SP Flags NL
|
| [Exactly once.]
|
| A series of space-separated status flags, in lexical order (as ASCII
| byte strings). Currently documented flags are:
|
| "BadDirectory" if the router is believed to be useless as a
| directory cache (because its directory port isn't working,
| its bandwidth is always throttled, or for some similar
| reason).
| "Fast" if the router is suitable for high-bandwidth circuits.
| "Guard" if the router is suitable for use as an entry guard.
| "HSDir" if the router is considered a v2 hidden service directory.
| "Named" if the router's identity-nickname mapping is canonical,
| and this authority binds names.
| "Stable" if the router is suitable for long-lived circuits.
| "Running" if the router is currently usable.
| "Valid" if the router has been 'validated'.
| "V2Dir" if the router implements the v2 directory protocol.
:param string line: An 's'-line from an bridge-network-status descriptor.
:rtype: tuple
:returns: A 2-tuple of booleans, the first is True if the bridge has the
"Running" flag, and the second is True if it has the "Stable" flag.
"""
line = line[2:]
flags = [x.capitalize() for x in line.split()]
fast = 'Fast' in flags
running = 'Running' in flags
stable = 'Stable' in flags
guard = 'Guard' in flags
valid = 'Valid' in flags
if (fast or running or stable or guard or valid):
logging.debug("Parsed Flags: %s%s%s%s%s"
% ('Fast ' if fast else '',
'Running ' if running else '',
'Stable ' if stable else '',
'Guard ' if guard else '',
'Valid ' if valid else ''))
# Right now, we only care about 'Running' and 'Stable'
return running, stable
| 36.89781
| 79
| 0.614639
|
bbf1e13c10521e0b31a77a9bd282c88ea408bf29
| 6,656
|
py
|
Python
|
kubernetes/client/models/v1beta1_certificate_signing_request_condition.py
|
palnabarun/Python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta1_certificate_signing_request_condition.py
|
palnabarun/Python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | 7
|
2020-11-07T10:35:21.000Z
|
2022-02-07T03:06:25.000Z
|
kubernetes/client/models/v1beta1_certificate_signing_request_condition.py
|
palnabarun/python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1CertificateSigningRequestCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'type': 'str'
}
attribute_map = {
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'type': 'type'
}
def __init__(self, last_update_time=None, message=None, reason=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1beta1CertificateSigningRequestCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_update_time = None
self._message = None
self._reason = None
self._type = None
self.discriminator = None
if last_update_time is not None:
self.last_update_time = last_update_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.type = type
@property
def last_update_time(self):
"""Gets the last_update_time of this V1beta1CertificateSigningRequestCondition. # noqa: E501
timestamp for the last update to this condition # noqa: E501
:return: The last_update_time of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this V1beta1CertificateSigningRequestCondition.
timestamp for the last update to this condition # noqa: E501
:param last_update_time: The last_update_time of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""Gets the message of this V1beta1CertificateSigningRequestCondition. # noqa: E501
human readable message with details about the request state # noqa: E501
:return: The message of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1beta1CertificateSigningRequestCondition.
human readable message with details about the request state # noqa: E501
:param message: The message of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1beta1CertificateSigningRequestCondition. # noqa: E501
brief reason for the request state # noqa: E501
:return: The reason of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1beta1CertificateSigningRequestCondition.
brief reason for the request state # noqa: E501
:param reason: The reason of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def type(self):
"""Gets the type of this V1beta1CertificateSigningRequestCondition. # noqa: E501
request approval state, currently Approved or Denied. # noqa: E501
:return: The type of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta1CertificateSigningRequestCondition.
request approval state, currently Approved or Denied. # noqa: E501
:param type: The type of this V1beta1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CertificateSigningRequestCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1CertificateSigningRequestCondition):
return True
return self.to_dict() != other.to_dict()
| 32
| 129
| 0.629207
|
3cb3aab2972ddef7e033e503b54c9982069abff2
| 5,135
|
py
|
Python
|
samcli/lib/bootstrap/companion_stack/data_types.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 859
|
2020-08-25T03:53:17.000Z
|
2022-03-31T12:33:07.000Z
|
samcli/lib/bootstrap/companion_stack/data_types.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1,369
|
2020-08-25T10:57:03.000Z
|
2022-03-31T23:00:25.000Z
|
samcli/lib/bootstrap/companion_stack/data_types.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 275
|
2020-08-25T19:33:50.000Z
|
2022-03-26T08:32:52.000Z
|
"""
Date type classes for companion stacks
"""
import re
from typing import Optional
from samcli.lib.utils.hash import str_checksum
class CompanionStack:
"""
Abstraction class for the companion stack
Companion stack name will be generated by this class.
"""
_parent_stack_name: str
_escaped_parent_stack_name: str
_parent_stack_hash: str
_stack_name: str
def __init__(self, parent_stack_name: str) -> None:
self._parent_stack_name = parent_stack_name
self._escaped_parent_stack_name = re.sub(r"[^a-z0-9]", "", self._parent_stack_name.lower())
self._parent_stack_hash = str_checksum(self._parent_stack_name)
# There is max 128 characters limit on the length of stack name.
# Using MD5 to avoid collision after truncating
# 104 + 1 + 8 + 15 = 128 max char
self._stack_name = f"{self._parent_stack_name[:104]}-{self._parent_stack_hash[:8]}-CompanionStack"
@property
def parent_stack_name(self) -> str:
"""
Parent stack name
"""
return self._parent_stack_name
@property
def escaped_parent_stack_name(self) -> str:
"""
Parent stack name with only alphanumeric characters
"""
return self._escaped_parent_stack_name
@property
def parent_stack_hash(self) -> str:
"""
MD5 hash of parent stack name
"""
return self._parent_stack_hash
@property
def stack_name(self) -> str:
"""
Companion stack stack name
"""
return self._stack_name
class ECRRepo:
"""
Abstraction class for ECR repos in companion stacks
Logical ID, Physical ID, and Repo URI will be generated with this class.
"""
_function_logical_id: Optional[str]
_escaped_function_logical_id: Optional[str]
_function_md5: Optional[str]
_companion_stack: Optional[CompanionStack]
_logical_id: Optional[str]
_physical_id: Optional[str]
_output_logical_id: Optional[str]
def __init__(
self,
companion_stack: Optional[CompanionStack] = None,
function_logical_id: Optional[str] = None,
logical_id: Optional[str] = None,
physical_id: Optional[str] = None,
output_logical_id: Optional[str] = None,
):
"""
Must be specified either with
companion_stack and function_logical_id
or
logical_id, physical_id, and output_logical_id
"""
self._function_logical_id = function_logical_id
self._escaped_function_logical_id = (
re.sub(r"[^a-z0-9]", "", self._function_logical_id.lower())
if self._function_logical_id is not None
else None
)
self._function_md5 = str_checksum(self._function_logical_id) if self._function_logical_id is not None else None
self._companion_stack = companion_stack
self._logical_id = logical_id
self._physical_id = physical_id
self._output_logical_id = output_logical_id
@property
def logical_id(self) -> Optional[str]:
if self._logical_id is None and self._function_logical_id and self._function_md5:
# MD5 is used to avoid two having the same escaped name with different Lambda Functions
# For example: Helloworld and HELLO-WORLD
# 52 + 8 + 4 = 64 max char
self._logical_id = self._function_logical_id[:52] + self._function_md5[:8] + "Repo"
return self._logical_id
@property
def physical_id(self) -> Optional[str]:
if (
self._physical_id is None
and self._companion_stack
and self._function_md5
and self._escaped_function_logical_id
):
# The physical ID is constructed with escaped_stack_name + stack_md5[:8] as prefix/path and
# followed by escaped_lambda_logical_id + function_md5[:8] + "repo" to show
# the linkage between the function and the repo
# 128 + 8 + 1 + 64 + 8 + 4 = 213 max char
self._physical_id = (
self._companion_stack.escaped_parent_stack_name
+ self._companion_stack.parent_stack_hash[:8]
+ "/"
+ self._escaped_function_logical_id
+ self._function_md5[:8]
+ "repo"
)
return self._physical_id
@property
def output_logical_id(self) -> Optional[str]:
if self._output_logical_id is None and self._function_logical_id and self._function_md5:
self._output_logical_id = self._function_logical_id[:52] + self._function_md5[:8] + "Out"
return self._output_logical_id
@staticmethod
def get_domain(region: str) -> str:
# https://docs.amazonaws.cn/en_us/aws/latest/userguide/endpoints-Beijing.html
if region.startswith("cn-"):
return "amazonaws.com.cn"
return "amazonaws.com"
def get_repo_uri(self, account_id, region) -> str:
domain = ECRRepo.get_domain(region)
return f"{account_id}.dkr.ecr.{region}.{domain}/{self.physical_id}"
| 35.171233
| 119
| 0.644985
|
63fbc922e2b1281f3ec90ecb4a7716e123760904
| 12,922
|
py
|
Python
|
frappe/utils/file_manager.py
|
AWDGroup/frappe
|
9687384328012bf82a2784ab950c4f74a69b4ebe
|
[
"MIT"
] | 1
|
2019-05-21T04:55:01.000Z
|
2019-05-21T04:55:01.000Z
|
frappe/utils/file_manager.py
|
AWDGroup/frappe
|
9687384328012bf82a2784ab950c4f74a69b4ebe
|
[
"MIT"
] | null | null | null |
frappe/utils/file_manager.py
|
AWDGroup/frappe
|
9687384328012bf82a2784ab950c4f74a69b4ebe
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, base64, re
import hashlib
import mimetypes
import io
from frappe.utils import get_hook_method, get_files_path, random_string, encode, cstr, call_hook_method, cint
from frappe import _
from frappe import conf
from copy import copy
from six.moves.urllib.parse import unquote
from six import text_type, PY2
class MaxFileSizeReachedError(frappe.ValidationError):
pass
def get_file_url(file_data_name):
data = frappe.db.get_value("File", file_data_name, ["file_name", "file_url"], as_dict=True)
return data.file_url or data.file_name
def upload():
# get record details
dt = frappe.form_dict.doctype
dn = frappe.form_dict.docname
df = frappe.form_dict.docfield
file_url = frappe.form_dict.file_url
filename = frappe.form_dict.filename
frappe.form_dict.is_private = cint(frappe.form_dict.is_private)
if not filename and not file_url:
frappe.msgprint(_("Please select a file or url"),
raise_exception=True)
file_doc = get_file_doc()
comment = {}
if dt and dn:
comment = frappe.get_doc(dt, dn).add_comment("Attachment",
_("added {0}").format("<a href='{file_url}' target='_blank'>{file_name}</a>{icon}".format(**{
"icon": ' <i class="fa fa-lock text-warning"></i>' \
if file_doc.is_private else "",
"file_url": file_doc.file_url.replace("#", "%23") \
if file_doc.file_name else file_doc.file_url,
"file_name": file_doc.file_name or file_doc.file_url
})))
return {
"name": file_doc.name,
"file_name": file_doc.file_name,
"file_url": file_doc.file_url,
"is_private": file_doc.is_private,
"comment": comment.as_dict() if comment else {}
}
def get_file_doc(dt=None, dn=None, folder=None, is_private=None, df=None):
'''returns File object (Document) from given parameters or form_dict'''
r = frappe.form_dict
if dt is None: dt = r.doctype
if dn is None: dn = r.docname
if df is None: df = r.docfield
if folder is None: folder = r.folder
if is_private is None: is_private = r.is_private
if r.filedata:
file_doc = save_uploaded(dt, dn, folder, is_private, df)
elif r.file_url:
file_doc = save_url(r.file_url, r.filename, dt, dn, folder, is_private, df)
return file_doc
def save_uploaded(dt, dn, folder, is_private, df=None):
fname, content = get_uploaded_content()
if content:
return save_file(fname, content, dt, dn, folder, is_private=is_private, df=df);
else:
raise Exception
def save_url(file_url, filename, dt, dn, folder, is_private, df=None):
# if not (file_url.startswith("http://") or file_url.startswith("https://")):
# frappe.msgprint("URL must start with 'http://' or 'https://'")
# return None, None
file_url = unquote(file_url)
file_size = frappe.form_dict.file_size
f = frappe.get_doc({
"doctype": "File",
"file_url": file_url,
"file_name": filename,
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"is_private": is_private
})
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_uploaded_content():
# should not be unicode when reading a file, hence using frappe.form
if 'filedata' in frappe.form_dict:
if "," in frappe.form_dict.filedata:
frappe.form_dict.filedata = frappe.form_dict.filedata.rsplit(",", 1)[1]
frappe.uploaded_content = base64.b64decode(frappe.form_dict.filedata)
frappe.uploaded_filename = frappe.form_dict.filename
return frappe.uploaded_filename, frappe.uploaded_content
else:
frappe.msgprint(_('No file attached'))
return None, None
def save_file(fname, content, dt, dn, folder=None, decode=False, is_private=0, df=None):
if decode:
if isinstance(content, text_type):
content = content.encode("utf-8")
if b"," in content:
content = content.split(b",")[1]
content = base64.b64decode(content)
file_size = check_max_file_size(content)
content_hash = get_content_hash(content)
content_type = mimetypes.guess_type(fname)[0]
fname = get_file_name(fname, content_hash[-6:])
file_data = get_file_data_from_hash(content_hash, is_private=is_private)
if not file_data:
call_hook_method("before_write_file", file_size=file_size)
write_file_method = get_hook_method('write_file', fallback=save_file_on_filesystem)
file_data = write_file_method(fname, content, content_type=content_type, is_private=is_private)
file_data = copy(file_data)
file_data.update({
"doctype": "File",
"attached_to_doctype": dt,
"attached_to_name": dn,
"attached_to_field": df,
"folder": folder,
"file_size": file_size,
"content_hash": content_hash,
"is_private": is_private
})
f = frappe.get_doc(file_data)
f.flags.ignore_permissions = True
try:
f.insert()
except frappe.DuplicateEntryError:
return frappe.get_doc("File", f.duplicate_entry)
return f
def get_file_data_from_hash(content_hash, is_private=0):
for name in frappe.db.sql_list("select name from `tabFile` where content_hash=%s and is_private=%s", (content_hash, is_private)):
b = frappe.get_doc('File', name)
return {k: b.get(k) for k in frappe.get_hooks()['write_file_keys']}
return False
def save_file_on_filesystem(fname, content, content_type=None, is_private=0):
fpath = write_file(content, fname, is_private)
if is_private:
file_url = "/private/files/{0}".format(fname)
else:
file_url = "/files/{0}".format(fname)
return {
'file_name': os.path.basename(fpath),
'file_url': file_url
}
def get_max_file_size():
return conf.get('max_file_size') or 10485760
def check_max_file_size(content):
max_file_size = get_max_file_size()
file_size = len(content)
if file_size > max_file_size:
frappe.msgprint(_("File size exceeded the maximum allowed size of {0} MB").format(
max_file_size / 1048576),
raise_exception=MaxFileSizeReachedError)
return file_size
def write_file(content, fname, is_private=0):
"""write file to disk with a random name (to compare)"""
file_path = get_files_path(is_private=is_private)
# create directory (if not exists)
frappe.create_folder(file_path)
# write the file
if isinstance(content, text_type):
content = content.encode()
with open(os.path.join(file_path.encode('utf-8'), fname.encode('utf-8')), 'wb+') as f:
f.write(content)
return get_files_path(fname, is_private=is_private)
def remove_all(dt, dn, from_delete=False):
"""remove all files in a transaction"""
try:
for fid in frappe.db.sql_list("""select name from `tabFile` where
attached_to_doctype=%s and attached_to_name=%s""", (dt, dn)):
remove_file(fid, dt, dn, from_delete)
except Exception as e:
if e.args[0]!=1054: raise # (temp till for patched)
def remove_file_by_url(file_url, doctype=None, name=None):
if doctype and name:
fid = frappe.db.get_value("File", {"file_url": file_url,
"attached_to_doctype": doctype, "attached_to_name": name})
else:
fid = frappe.db.get_value("File", {"file_url": file_url})
if fid:
return remove_file(fid)
def remove_file(fid, attached_to_doctype=None, attached_to_name=None, from_delete=False):
"""Remove file and File entry"""
file_name = None
if not (attached_to_doctype and attached_to_name):
attached = frappe.db.get_value("File", fid,
["attached_to_doctype", "attached_to_name", "file_name"])
if attached:
attached_to_doctype, attached_to_name, file_name = attached
ignore_permissions, comment = False, None
if attached_to_doctype and attached_to_name and not from_delete:
doc = frappe.get_doc(attached_to_doctype, attached_to_name)
ignore_permissions = doc.has_permission("write") or False
if frappe.flags.in_web_form:
ignore_permissions = True
if not file_name:
file_name = frappe.db.get_value("File", fid, "file_name")
comment = doc.add_comment("Attachment Removed", _("Removed {0}").format(file_name))
frappe.delete_doc("File", fid, ignore_permissions=ignore_permissions)
return comment
def delete_file_data_content(doc, only_thumbnail=False):
method = get_hook_method('delete_file_data_content', fallback=delete_file_from_filesystem)
method(doc, only_thumbnail=only_thumbnail)
def delete_file_from_filesystem(doc, only_thumbnail=False):
"""Delete file, thumbnail from File document"""
if only_thumbnail:
delete_file(doc.thumbnail_url)
else:
delete_file(doc.file_url)
delete_file(doc.thumbnail_url)
def delete_file(path):
"""Delete file from `public folder`"""
if path:
if ".." in path.split("/"):
frappe.msgprint(_("It is risky to delete this file: {0}. Please contact your System Manager.").format(path))
parts = os.path.split(path.strip("/"))
if parts[0]=="files":
path = frappe.utils.get_site_path("public", "files", parts[-1])
else:
path = frappe.utils.get_site_path("private", "files", parts[-1])
path = encode(path)
if os.path.exists(path):
os.remove(path)
def get_file(fname):
"""Returns [`file_name`, `content`] for given file name `fname`"""
file_path = get_file_path(fname)
# read the file
if PY2:
with open(encode(file_path)) as f:
content = f.read()
else:
with io.open(encode(file_path), mode='rb') as f:
content = f.read()
try:
# for plain text files
content = content.decode()
except UnicodeDecodeError:
# for .png, .jpg, etc
pass
return [file_path.rsplit("/", 1)[-1], content]
def get_file_path(file_name):
"""Returns file path from given file name"""
f = frappe.db.sql("""select file_url from `tabFile`
where name=%s or file_name=%s""", (file_name, file_name))
if f:
file_name = f[0][0]
file_path = file_name
if "/" not in file_path:
file_path = "/files/" + file_path
if file_path.startswith("/private/files/"):
file_path = get_files_path(*file_path.split("/private/files/", 1)[1].split("/"), is_private=1)
elif file_path.startswith("/files/"):
file_path = get_files_path(*file_path.split("/files/", 1)[1].split("/"))
else:
frappe.throw(_("There is some problem with the file url: {0}").format(file_path))
return file_path
def get_content_hash(content):
if isinstance(content, text_type):
content = content.encode()
return hashlib.md5(content).hexdigest()
def get_file_name(fname, optional_suffix):
# convert to unicode
fname = cstr(fname)
n_records = frappe.db.sql("select name from `tabFile` where file_name=%s", fname)
if len(n_records) > 0 or os.path.exists(encode(get_files_path(fname))):
f = fname.rsplit('.', 1)
if len(f) == 1:
partial, extn = f[0], ""
else:
partial, extn = f[0], "." + f[1]
return '{partial}{suffix}{extn}'.format(partial=partial, extn=extn, suffix=optional_suffix)
return fname
@frappe.whitelist()
def download_file(file_url):
"""
Download file using token and REST API. Valid session or
token is required to download private files.
Method : GET
Endpoint : frappe.utils.file_manager.download_file
URL Params : file_name = /path/to/file relative to site path
"""
file_doc = frappe.get_doc("File", {"file_url":file_url})
file_doc.check_permission("read")
with open(getattr(frappe.local, "site_path", None) + file_url, "rb") as fileobj:
filedata = fileobj.read()
frappe.local.response.filename = file_url[file_url.rfind("/")+1:]
frappe.local.response.filecontent = filedata
frappe.local.response.type = "download"
def extract_images_from_doc(doc, fieldname):
content = doc.get(fieldname)
content = extract_images_from_html(doc, content)
if frappe.flags.has_dataurl:
doc.set(fieldname, content)
def extract_images_from_html(doc, content):
frappe.flags.has_dataurl = False
def _save_file(match):
data = match.group(1)
data = data.split("data:")[1]
headers, content = data.split(",")
if "filename=" in headers:
filename = headers.split("filename=")[-1]
# decode filename
if not isinstance(filename, text_type):
filename = text_type(filename, 'utf-8')
else:
mtype = headers.split(";")[0]
filename = get_random_filename(content_type=mtype)
doctype = doc.parenttype if doc.parent else doc.doctype
name = doc.parent or doc.name
# TODO fix this
file_url = save_file(filename, content, doctype, name, decode=True).get("file_url")
if not frappe.flags.has_dataurl:
frappe.flags.has_dataurl = True
return '<img src="{file_url}"'.format(file_url=file_url)
if content:
content = re.sub('<img[^>]*src\s*=\s*["\'](?=data:)(.*?)["\']', _save_file, content)
return content
def get_random_filename(extn=None, content_type=None):
if extn:
if not extn.startswith("."):
extn = "." + extn
elif content_type:
extn = mimetypes.guess_extension(content_type)
return random_string(7) + (extn or "")
@frappe.whitelist()
def validate_filename(filename):
from frappe.utils import now_datetime
timestamp = now_datetime().strftime(" %Y-%m-%d %H:%M:%S")
fname = get_file_name(filename, timestamp)
return fname
| 29.43508
| 130
| 0.726203
|
72a4621d5ed7c20157581b4aa02cf3c07f951f79
| 2,271
|
py
|
Python
|
tensortrade/orders/order_spec.py
|
owodunni/tensortrade
|
d6d7fb12f926b3c451d7e200cb693d750a35f95a
|
[
"Apache-2.0"
] | 1
|
2020-04-05T05:15:36.000Z
|
2020-04-05T05:15:36.000Z
|
tensortrade/orders/order_spec.py
|
owodunni/tensortrade
|
d6d7fb12f926b3c451d7e200cb693d750a35f95a
|
[
"Apache-2.0"
] | null | null | null |
tensortrade/orders/order_spec.py
|
owodunni/tensortrade
|
d6d7fb12f926b3c451d7e200cb693d750a35f95a
|
[
"Apache-2.0"
] | 2
|
2020-12-31T02:57:22.000Z
|
2021-06-21T16:12:24.000Z
|
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import Callable
from tensortrade.base import Identifiable
from tensortrade.orders import Order, TradeSide, TradeType
class OrderSpec(Identifiable):
def __init__(self,
side: TradeSide,
trade_type: TradeType,
pair: 'TradingPair',
criteria: Callable[['Order', 'Exchange'], bool] = None):
self.side = side
self.type = trade_type
self.pair = pair
self.criteria = criteria
def create_order(self, order: 'Order', exchange: 'Exchange') -> 'Order':
wallet_instrument = self.side.instrument(self.pair)
wallet = order.portfolio.get_wallet(exchange.id, instrument=wallet_instrument)
quantity = wallet.locked.get(order.path_id, 0)
return Order(step=exchange.clock.step,
exchange_name=order.exchange_name,
side=self.side,
trade_type=self.type,
pair=self.pair,
quantity=quantity,
portfolio=order.portfolio,
price=order.price,
criteria=self.criteria,
start=order.start,
end=order.end,
path_id=order.path_id)
def to_dict(self):
return {
"id": self.id,
"type": self.type,
"pair": self.pair,
"criteria": self.criteria
}
def __str__(self):
data = ['{}={}'.format(k, v) for k, v in self.to_dict().items()]
return '<{}: {}>'.format(self.__class__.__name__, ', '.join(data))
def __repr__(self):
return str(self)
| 33.895522
| 86
| 0.597974
|
af5f5d42ea6717872a77c8e365e3e6bef104cf9d
| 415
|
py
|
Python
|
venv/Scripts/pip3-script.py
|
baniraz/selenium_
|
e07362659fcc79b048cd88ea30429b0f6123a4d1
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3-script.py
|
baniraz/selenium_
|
e07362659fcc79b048cd88ea30429b0f6123a4d1
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3-script.py
|
baniraz/selenium_
|
e07362659fcc79b048cd88ea30429b0f6123a4d1
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\User\Documents\GitHub\selenium_\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| 31.923077
| 69
| 0.66988
|
0a94c33594634aa098314c06ddcd857e45827fda
| 7,862
|
py
|
Python
|
mmtbx/conformation_dependent_library/metal_coordination_library.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
mmtbx/conformation_dependent_library/metal_coordination_library.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/conformation_dependent_library/metal_coordination_library.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import sys
from cctbx import geometry_restraints
from cctbx.geometry_restraints import linking_class
origin_ids = linking_class.linking_class()
headers = ['Zn-SG (A)',
'SG-Zn-SG (degrees)',
'Zn-N (A)',
'N-Zn-N (degrees)',
'ZnCysxHisy',
]
database = {'Zn2+ tetrahedral': {
(4,0) : {
('ZN', 'SG') : (2.330, 0.029, 1033),
('SG', 'ZN', 'SG') : (109.45, 5.46, 1553),
}, # n/a n/a Cys4
(3,1) : {
('ZN', 'SG') : (2.318, 0.027, 912),
('SG', 'ZN', 'SG') : (112.15, 3.96, 912),
('ZN', 'ND1') : (2.074, 0.056, 303),
}, #n/a Cys3His1
(2,2) : {
('ZN', 'SG') : (2.306, 0.029, 76),
('SG', 'ZN', 'SG') : (116.23, 4.58, 38),
('ZN', 'ND1') : (2.040, 0.050, 65),
('ND1', 'ZN', 'ND1') : (102.38, 5.44, 38),
}, # Cys2His2
(1,3) : {
('ZN', 'SG') : (2.298, 0.017, 12),
('ZN', 'ND1') : (2.002, 0.045, 36),
('ND1', 'ZN', 'ND1') : (107.23, 4.78, 36),
}, # Cys1His3
(0,4) : {
}, #n/a n/a Insufficient data Insufficient data His4
}
}
database['ZN'] = database['Zn2+ tetrahedral']
for nums, restraints in database['Zn2+ tetrahedral'].items():
for atoms, values in list(restraints.items()):
if 'ND1' in atoms:
key = list(atoms)
for i, atom in enumerate(key):
if atom=='ND1':
key[i]='NE2'
restraints[tuple(key)]=values
def print_restraints(db):
print('-'*80)
for coordination, library in sorted(db.items()):
print(coordination)
for nums, restraints in sorted(library.items()):
print(' %s' % str(nums))
for atoms, values in sorted(restraints.items()):
print(' ',atoms, values)
def check_other_in_database(metal, other):
sub = database.get(metal.name.strip(), None)
if sub is None: return False
for key, item in sub.items():
for atoms in item:
if other.name.strip() in atoms: return True
return False
def get_metal_coordination_proxies(pdb_hierarchy,
nonbonded_proxies,
prefix=None,
params=None,
log=sys.stdout,
add_segid=None,
verbose=False):
def is_residue_already_linked_to_metal(linked, atom):
for link in linked:
if link.parent().id_str()==atom.parent().id_str():
break
else:
return False
return True
hbond_distance_cutoff = 3
if params is not None:
hbond_distance_cutoff = params.hbond_distance_cutoff
mbonds = {}
atoms = pdb_hierarchy.atoms()
sites_cart = atoms.extract_xyz()
get_sorted_result = nonbonded_proxies.get_sorted(
by_value="delta",
sites_cart=sites_cart)
if get_sorted_result is None: return mbonds
sorted_nonb, n_not_shown = get_sorted_result
n_nonb = len(sorted_nonb)
i = 0
while i < n_nonb and sorted_nonb[i][3] < hbond_distance_cutoff:
(labels, i_seq, j_seq, dist, vdw_distance, sym_op_j, rt_mx) = sorted_nonb[i]
a1 = atoms[i_seq]
ag1 = a1.parent()
a2 = atoms[j_seq]
ag2 = a2.parent()
metal = None
for metal_element in database:
if a1.name.strip()==metal_element:
metal = a1
other = a2
elif a2.name.strip()==metal_element:
metal = a2
other = a1
if metal:
if not check_other_in_database(metal, other): continue
mbonds.setdefault(metal.i_seq, {})
mbonds[metal.i_seq]['metal'] = metal
mbonds[metal.i_seq].setdefault('others', [])
if not is_residue_already_linked_to_metal(mbonds[metal.i_seq]['others'],
other,
):
mbonds[metal.i_seq]['others'].append(other)
i += 1
pairs = []
if verbose:
for key, item in mbonds.items():
for label, l in item.items():
if type(l)==type([]):
for atom in l:
print(' ',atom.quote())
else:
print(l.quote())
return mbonds
def get_proxies(coordination):
#
# TODO
# - check that only one link is made to each resiude
# e.g. 1a6y "2080 ZN ZN B 451 .*." "1874 CB CYS B 153 .*."
#
def _bond_generator(atoms):
for atom in atoms['others']:
yield atoms['metal'], atom
def _angle_generator(atoms):
for i, a1 in enumerate(atoms['others']):
for j, a2 in enumerate(atoms['others']):
if i==j: break
yield a1, atoms['metal'], a2
def _default_bonds(atoms):
tmp = []
for a1, a2 in _bond_generator(atoms):
p = geometry_restraints.bond_simple_proxy(
i_seqs=[a1.i_seq, a2.i_seq],
distance_ideal=2.3,
weight=1.0/0.03**2,
slack=0,
top_out=False,
limit=1,
origin_id=origin_ids.get_origin_id('metal coordination'))
tmp.append(p)
return tmp
bonds = []
angles = []
if coordination is None: return bonds, angles
atoms = None
for metal_i_seq, atoms in coordination.items():
if len(atoms['others'])<4:
bonds += _default_bonds(atoms)
continue
cyss = []
hiss = []
for atom in atoms['others']:
if atom.parent().resname=='CYS':
cyss.append(atom)
elif atom.parent().resname=='HIS':
hiss.append(atom)
key = (len(cyss), len(hiss))
metal_name = atoms['metal'].name.strip()
# if key not in database[metal_name]: continue
ideals = database[metal_name][key]
if not atoms: continue #return None, None
for a1, a2 in _bond_generator(atoms):
key = (a1.name.strip(), a2.name.strip())
if key not in ideals: continue
t = ideals[key]
p = geometry_restraints.bond_simple_proxy(
i_seqs=[a1.i_seq, a2.i_seq],
distance_ideal=t[0],
weight=1.0/t[1]**2,
slack=0,
top_out=False,
limit=1,
origin_id=origin_ids.get_origin_id('metal coordination'))
bonds.append(p)
for a1, a2, a3 in _angle_generator(atoms):
key = (a1.name.strip(), a2.name.strip(), a3.name.strip())
if key not in ideals: continue
t = ideals[key]
p = geometry_restraints.angle_proxy(
i_seqs=[a1.i_seq, a2.i_seq, a3.i_seq],
angle_ideal=t[0],
weight=1.0/t[1]**2,
origin_id=origin_ids.get_origin_id('metal coordination'))
angles.append(p)
return bonds, angles
def run(model_filename=None):
import mmtbx.monomer_library.pdb_interpretation as pdb_inter
#print_restraints(database)
if model_filename is not None:
from iotbx import pdb
pdb_inp = pdb.input(model_filename)
pdb_hierarchy = pdb_inp.construct_hierarchy()
#pdb_hierarchy.show()
pdb_processed_file = pdb_inter.run(
args=[model_filename],
assume_hydrogens_all_missing=False,
hard_minimum_nonbonded_distance=0.0,
nonbonded_distance_threshold=None,
substitute_non_crystallographic_unit_cell_if_necessary=True,
)
grm = pdb_processed_file.geometry_restraints_manager()
xrs = pdb_processed_file.xray_structure()
sites_cart = xrs.sites_cart()
site_labels = xrs.scatterers().extract_labels()
pair_proxies=grm.pair_proxies(sites_cart=sites_cart,site_labels=site_labels)
proxies_info_nonbonded = pair_proxies.nonbonded_proxies.get_sorted(
by_value="delta",
sites_cart=sites_cart,
site_labels=site_labels)
rc = get_metal_coordination_proxies(pdb_hierarchy,
pair_proxies.nonbonded_proxies,
)
bonds, angles = get_proxies(rc)
print('\n\tbonds, angles : %d, %d\n\n' % (len(bonds), len(angles)))
if __name__=="__main__":
args = sys.argv[1:]
del sys.argv[1:]
run(*tuple(args))
| 33.313559
| 80
| 0.585729
|
f0ccb633264d7faba7c7fdfec32e834078072c65
| 23,292
|
py
|
Python
|
scripts/deploy/deploy_badger.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
scripts/deploy/deploy_badger.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
scripts/deploy/deploy_badger.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
from brownie import *
from config.badger_config import badger_config, badger_total_supply
from helpers.constants import APPROVED_STAKER_ROLE
from helpers.registry import registry
from rich.console import Console
from scripts.systems.badger_system import BadgerSystem, print_to_file
from tests.helpers import distribute_from_whales
console = Console()
def test_deploy(test=False, uniswap=True):
# Badger Deployer
deployer = ""
keeper = ""
guardian = ""
accounts.at(
web3.toChecksumAddress("0xDA25ee226E534d868f0Dd8a459536b03fEE9079b"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x872213E29C85d7e30F1C8202FC47eD1Ec124BB1D"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x29F7F8896Fb913CF7f9949C623F896a154727919"),
force=True,
)
if test:
accounts.at(
web3.toChecksumAddress("0xDA25ee226E534d868f0Dd8a459536b03fEE9079b"),
force=True,
)
else:
# Load accounts from keystore
deployer = accounts.load("badger_deployer")
keeper = accounts.load("badger_keeper")
guardian = accounts.load("badger_guardian")
# Ganache Accounts
if test:
accounts.at("0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", force=True)
accounts.at("0xFFcf8FDEE72ac11b5c542428B35EEF5769C409f0", force=True)
accounts.at("0x22d491Bde2303f2f43325b2108D26f1eAbA1e32b", force=True)
accounts.at("0xE11BA2b4D45Eaed5996Cd0823791E0C93114882d", force=True)
accounts.at("0xd03ea8624C8C5987235048901fB614fDcA89b117", force=True)
accounts.at("0x95cED938F7991cd0dFcb48F0a06a40FA1aF46EBC", force=True)
accounts.at("0x3E5e9111Ae8eB78Fe1CC3bb8915d5D461F3Ef9A9", force=True)
accounts.at("0x28a8746e75304c0780E011BEd21C72cD78cd535E", force=True)
accounts.at("0xACa94ef8bD5ffEE41947b4585a84BdA5a3d3DA6E", force=True)
accounts.at("0x1dF62f291b2E969fB0849d99D9Ce41e2F137006e", force=True)
# Unlocked Accounts
if test:
accounts.at(
web3.toChecksumAddress("0x193991827e291599a262e7fa7d212ff1ae31d110"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x97ca371d59bbfefdb391aa6dcbdf4455fec361f2"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x3d24d77bec08549d7ea86c4e9937204c11e153f1"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0xcD9e6Df80169b6a2CFfDaE613fAbC3F7C3647B14"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0xaf379f0228ad0d46bb7b4f38f9dc9bcc1ad0360c"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0xc25099792e9349c7dd09759744ea681c7de2cb66"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0xb1f2cdec61db658f091671f5f199635aef202cac"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x2bf792ffe8803585f74e06907900c2dc2c29adcb"),
force=True,
)
# Test Accounts
accounts.at(
web3.toChecksumAddress("0xe7bab002A39f9672a1bD0E949d3128eeBd883575"),
force=True,
)
accounts.at(
web3.toChecksumAddress("0x482c741b0711624d1f462E56EE5D8f776d5970dC"),
force=True,
)
deployer = accounts.at(
web3.toChecksumAddress(("0xDA25ee226E534d868f0Dd8a459536b03fEE9079b"))
)
keeper = accounts.at(
web3.toChecksumAddress(("0xDA25ee226E534d868f0Dd8a459536b03fEE9079b"))
)
guardian = accounts.at(
web3.toChecksumAddress(("0xDA25ee226E534d868f0Dd8a459536b03fEE9079b"))
)
print(
"Initialize Badger System",
{"deployer": deployer, "keeper": keeper, "guardian": guardian},
)
print(deployer.balance())
# ClaimEncoder.deploy({'from': deployer})
# assert False
badger = BadgerSystem(badger_config, None, deployer, keeper, guardian, deploy=False)
badger.test = test
badger_deploy_file = "deploy-final.json"
print("Connecting to deploy at " + badger_deploy_file)
with open(badger_deploy_file) as f:
badger_deploy = json.load(f)
print("Connect Logic Contracts")
badger.connect_logic(badger_deploy["logic"])
print("Create / Connect Badger<>wBTC LP Pair")
# pair = create_uniswap_pair(badger.token.address, registry.tokens.wbtc, deployer)
factory = interface.IUniswapV2Factory(
web3.toChecksumAddress(registry.uniswap.factoryV2)
)
# tx = factory.createPair(badger.token.address, registry.tokens.wbtc, {"from": deployer})
pairAddress = factory.getPair(badger.token.address, registry.tokens.wbtc)
pair = interface.IUniswapV2Pair(pairAddress)
badger.pair = pair
if uniswap:
print("Test: Distribute assets to deployer")
accounts.at(badger_config.dao.initialOwner, force=True)
badger.token.transfer(
deployer, badger_total_supply, {"from": badger_config.dao.initialOwner}
)
console.log(
"after initial token transfer",
badger.token.balanceOf(deployer) / 1e18,
badger_total_supply,
)
assert badger.token.balanceOf(deployer) == badger_total_supply
distribute_from_whales(badger, deployer)
console.log("after whale funding", badger.token.balanceOf(deployer) / 1e18)
print("Test: Add Badger<>wBTC Liquidity")
wbtc = interface.IERC20(registry.tokens.wbtc)
# In test mode, add liqudity to uniswap
badger.uniswap.addMaxLiquidity(badger.token, wbtc, deployer)
# print("Deploy core logic")
# badger.deploy_core_logic()
# print("Deploy Sett core logic")
# badger.deploy_sett_core_logic()
# badger.deploy_sett_strategy_logic()
console.log("before deploys", badger.token.balanceOf(deployer) / 1e18)
print("Deploy rewards & vesting infrastructure")
# badger.deploy_rewards_escrow()
# badger.deploy_badger_tree()
# badger.deploy_dao_badger_timelock()
# badger.deploy_team_vesting()
# badger.deploy_badger_hunt()
# print("Connect Rewards and Vesting Infrastructure")
badger.connect_rewards_escrow(badger_deploy["rewardsEscrow"])
badger.connect_badger_tree(badger_deploy["badgerTree"])
badger.connect_dao_badger_timelock(badger_deploy["daoBadgerTimelock"])
badger.connect_team_vesting(badger_deploy["teamVesting"])
badger.connect_badger_hunt(badger_deploy["badgerHunt"])
console.log("after reward infra deploys", badger.token.balanceOf(deployer) / 1e18)
print("Deploy Sett controllers")
# badger.add_controller("native")
# badger.add_controller("harvest")
badger.connect_controller(
"native", badger_deploy["sett_system"]["controllers"]["native"]
)
badger.connect_controller(
"harvest", badger_deploy["sett_system"]["controllers"]["harvest"]
)
print("Deploy native Sett vaults")
controller = badger.getController("native")
# badger.deploy_sett("native.badger", badger.token, controller)
# badger.deploy_sett("native.renCrv", registry.curve.pools.renCrv.token, controller)
# badger.deploy_sett("native.sbtcCrv", registry.curve.pools.sbtcCrv.token, controller)
# badger.deploy_sett("native.tbtcCrv", registry.curve.pools.tbtcCrv.token, controller)
# badger.deploy_sett("native.uniBadgerWbtc", badger.pair.address, controller)
settSystem = badger_deploy["sett_system"]
badger.connect_sett("native.badger", settSystem["vaults"]["native.badger"])
badger.connect_sett("native.renCrv", settSystem["vaults"]["native.renCrv"])
badger.connect_sett(
"native.sbtcCrv", badger_deploy["sett_system"]["vaults"]["native.sbtcCrv"]
)
badger.connect_sett(
"native.tbtcCrv", badger_deploy["sett_system"]["vaults"]["native.tbtcCrv"]
)
badger.connect_sett(
"native.uniBadgerWbtc",
badger_deploy["sett_system"]["vaults"]["native.uniBadgerWbtc"],
)
print("Deploy & configure native Sett strategies")
print("Deploy vault-specific staking rewards")
# badger.deploy_sett_staking_rewards("native.badger", badger.token, badger.token)
# badger.deploy_sett_staking_rewards(
# "native.uniBadgerWbtc", pair.address, badger.token
# )
badger.connect_sett_staking_rewards(
"native.badger", settSystem["rewards"]["native.badger"]
)
badger.connect_sett_staking_rewards(
"native.uniBadgerWbtc", settSystem["rewards"]["native.uniBadgerWbtc"]
)
print("Strategy: Native Badger")
# badger.deploy_strategy_native_badger()
badger.connect_strategy(
"native.badger",
settSystem["strategies"]["native.badger"],
"StrategyBadgerRewards",
)
print("Strategy: Native RenCrv")
# badger.deploy_strategy_native_rencrv()
badger.connect_strategy(
"native.renCrv",
settSystem["strategies"]["native.renCrv"],
"StrategyCurveGaugeRenBtcCrv",
)
print("Strategy: Native sBtcCrv")
# badger.deploy_strategy_native_sbtccrv()
badger.connect_strategy(
"native.sbtcCrv",
settSystem["strategies"]["native.sbtcCrv"],
"StrategyCurveGaugeSbtcCrv",
)
print("Strategy: Native tBtcCrv")
# badger.deploy_strategy_native_tbtccrv()
badger.connect_strategy(
"native.tbtCcrv",
settSystem["strategies"]["native.tbtcCrv"],
"StrategyCurveGaugeTbtcCrv",
)
print("Strategy: Native uniBadgerWbtc")
# badger.deploy_strategy_native_uniBadgerWbtc()
badger.connect_strategy(
"native.uniBadgerWbtc",
settSystem["strategies"]["native.uniBadgerWbtc"],
"StrategyBadgerLpMetaFarm",
)
print("Deploy harvest Sett vaults")
controller = badger.getController("harvest")
# badger.deploy_sett(
# "harvest.renCrv",
# registry.curve.pools.renCrv.token,
# controller,
# namePrefixOverride=True,
# namePrefix="Badger SuperSett (Harvest) ",
# symbolPrefix="bSuper",
# )
badger.connect_sett("harvest.renCrv", settSystem["vaults"]["harvest.renCrv"])
print("Deploy & configure harvest Sett strategies")
# badger.deploy_strategy_harvest_rencrv()
badger.connect_strategy(
"harvest.renCrv",
settSystem["strategies"]["harvest.renCrv"],
"StrategyHarvestMetaFarm",
)
# print("Deploy reward geysers")
# badger.deploy_geyser(badger.getSett("native.badger"), "native.badger")
# badger.deploy_geyser(badger.getSett("native.renCrv"), "native.renCrv")
# badger.deploy_geyser(badger.getSett("native.sbtcCrv"), "native.sbtcCrv")
# badger.deploy_geyser(badger.getSett("native.tbtcCrv"), "native.tbtcCrv")
# badger.deploy_geyser(badger.getSett("native.uniBadgerWbtc"), "native.uniBadgerWbtc")
# badger.deploy_geyser(badger.getSett("harvest.renCrv"), "harvest.renCrv")
print("Connect reward geysers")
badger.connect_geyser("native.badger", badger_deploy["geysers"]["native.badger"])
badger.connect_geyser("native.renCrv", badger_deploy["geysers"]["native.renCrv"])
badger.connect_geyser("native.sbtcCrv", badger_deploy["geysers"]["native.sbtcCrv"])
badger.connect_geyser("native.tbtcCrv", badger_deploy["geysers"]["native.tbtcCrv"])
badger.connect_geyser(
"native.uniBadgerWbtc", badger_deploy["geysers"]["native.uniBadgerWbtc"]
)
badger.connect_geyser("harvest.renCrv", badger_deploy["geysers"]["harvest.renCrv"])
# Transfer ownership of all sett Rewards contracts to multisig
# Transfer proxyAdmin to multisig
console.log("after deploys", badger.token.balanceOf(deployer) / 1e18)
return badger
def post_deploy_config(badger: BadgerSystem):
deployer = badger.deployer
"""
Set initial conditions on immediate post-deploy Badger
Transfer tokens to thier initial locations
- Rewards Escrow (40%, minus tokens initially distributed via Sett Special StakingRewards)
- Badger Hunt (15%)
- DAO Timelock (35%)
- Founder Rewards (10%)
"""
# Approve BadgerTree to recieve rewards tokens
print(deployer)
print(badger.rewardsEscrow.owner())
# badger.rewardsEscrow.approveRecipient(badger.badgerTree, {"from": deployer})
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("native.badger"), {"from": deployer}
# )
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("native.uniBadgerWbtc"), {"from": deployer}
# )
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("native.renCrv"), {"from": deployer}
# )
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("native.sbtcCrv"), {"from": deployer}
# )
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("native.tbtcCrv"), {"from": deployer}
# )
# badger.rewardsEscrow.approveRecipient(
# badger.getGeyser("harvest.renCrv"), {"from": deployer}
# )
# console.log("before signal locks", badger.token.balanceOf(deployer) / 1e18)
# # Geyser Signals
# """
# These signals are used to calculate the rewards distributions distributed via BadgerTree. The tokens are actually held in the RewardsEscrow and sent to the BadgerTree as needed.
# The escrow will only send a few days worth of rewards initially at a time to the RewardsTree as another failsafe mechanism.
# renbtcCRV — 76750 $BADGER
# sbtcCRV — 76,750 $BADGER
# tbtcCRV — 76,750 $BADGER
# Badger — 90,000 $BADGER / 2
# - 45000 in Sett StakingRewards
# - 45000 in Geyser
# Badger <>wBTC Uniswap LP — 130,000 $BADGER / 2
# - 65000 in Sett StakingRewards
# - 65000 in Geyser
# Super Sett
# Pickle renbtcCRV — 76,750 $BADGER
# Harvest renbtc CRV — 76,750 $BADGER
# """
# badger.signal_token_lock(
# "native.badger", badger_config.geyserParams.unlockSchedules.badger[0]
# )
# badger.signal_token_lock(
# "native.uniBadgerWbtc",
# badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0],
# )
# badger.signal_token_lock(
# "native.renCrv", badger_config.geyserParams.unlockSchedules.bRenCrv[0]
# )
# badger.signal_token_lock(
# "native.sbtcCrv", badger_config.geyserParams.unlockSchedules.bSbtcCrv[0]
# )
# badger.signal_token_lock(
# "native.tbtcCrv", badger_config.geyserParams.unlockSchedules.bTbtcCrv[0]
# )
# badger.signal_token_lock(
# "harvest.renCrv",
# badger_config.geyserParams.unlockSchedules.bSuperRenCrvHarvest[0],
# )
# console.log(
# "before initial token distribution", badger.token.balanceOf(deployer) / 1e18
# )
# ===== Initial Token Distribution =====
# == Native Badger ==
rewards = badger.getSettRewards("native.badger")
strategy = badger.getStrategy("native.badger")
badger.distribute_staking_rewards(
"native.badger",
badger_config.geyserParams.unlockSchedules.badger[0].amount,
notify=False,
)
rewards.grantRole(APPROVED_STAKER_ROLE, strategy, {"from": deployer})
# == Uni LP ==
rewards = badger.getSettRewards("native.uniBadgerWbtc")
strategy = badger.getStrategy("native.uniBadgerWbtc")
badger.distribute_staking_rewards(
"native.uniBadgerWbtc",
badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0].amount,
notify=False,
)
rewards.grantRole(APPROVED_STAKER_ROLE, strategy, {"from": deployer})
console.log(
"remaining after special pool distributions",
badger.token.balanceOf(deployer) / 1e18,
)
distributedToPools = (
badger_config.geyserParams.unlockSchedules.badger[0].amount
+ badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0].amount
)
# print(
# badger_config.geyserParams.unlockSchedules.badger[0],
# badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0],
# badger_config.geyserParams.unlockSchedules.bRenCrv[0],
# badger_config.geyserParams.unlockSchedules.bSbtcCrv[0],
# badger_config.geyserParams.unlockSchedules.bTbtcCrv[0],
# badger_config.geyserParams.unlockSchedules.bSuperRenCrvHarvest[0]
# )
supplyForWeek1Rewards = (
badger_config.geyserParams.unlockSchedules.badger[0].amount
+ badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0].amount
+ badger_config.geyserParams.unlockSchedules.bRenCrv[0].amount
+ badger_config.geyserParams.unlockSchedules.bSbtcCrv[0].amount
+ badger_config.geyserParams.unlockSchedules.bTbtcCrv[0].amount
+ badger_config.geyserParams.unlockSchedules.bSuperRenCrvHarvest[0].amount
)
toEscrow = (
badger_config.rewardsEscrowBadgerAmount
- distributedToPools
- supplyForWeek1Rewards
)
badger.initialRewardsEscrowBalance = toEscrow
badger.initialBadgerTreeBalance = supplyForWeek1Rewards
console.log(locals(), badger_config.rewardsEscrowBadgerAmount)
# == Badger Hunt ==
badger.token.transfer(
badger.badgerHunt, badger_config.huntParams.badgerAmount, {"from": deployer},
)
# == Badger Tree ==
badger.token.transfer(
badger.badgerTree, supplyForWeek1Rewards, {"from": deployer},
)
# == Rewards Escrow ==
badger.token.transfer(
badger.rewardsEscrow, toEscrow, {"from": deployer},
)
# == DAO Timelock ==
badger.token.transfer(
badger.daoBadgerTimelock,
badger_config.tokenLockParams.badgerLockAmount,
{"from": deployer},
)
console.log("after daoBadgerTimelock", badger.token.balanceOf(deployer) / 1e18)
# == Team Vesting ==
badger.token.transfer(
badger.teamVesting, badger_config.founderRewardsAmount, {"from": deployer}
)
tokenDistributionTargets = {
"deployer": 0,
"rewardsEscrow": toEscrow,
"native.badger Pool": badger_config.geyserParams.unlockSchedules.badger[
0
].amount,
"native.uniBadgerWbtc Pool": badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[
0
].amount,
"native.badger geyser": badger_config.geyserParams.unlockSchedules.badger[
0
].amount,
"native.uniBadgerWbtc geyser": badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[
0
].amount,
"native.renCrv geyser": badger_config.geyserParams.unlockSchedules.bRenCrv[
0
].amount,
"native.sbtcCrv geyser": badger_config.geyserParams.unlockSchedules.bSbtcCrv[
0
].amount,
"native.tbtcCrv geyser": badger_config.geyserParams.unlockSchedules.bTbtcCrv[
0
].amount,
"harvest.renCrv geyser": badger_config.geyserParams.unlockSchedules.bSuperRenCrvHarvest[
0
].amount,
"daoBadgerTimelock": badger_config.tokenLockParams.badgerLockAmount,
"teamVesting": badger_config.founderRewardsAmount,
"badgerHunt": badger_config.huntParams.badgerAmount,
"badgerTree": 0,
}
tokenDistributionBalances = {
"deployer": badger.token.balanceOf(deployer),
"rewardsEscrow": badger.token.balanceOf(badger.rewardsEscrow),
"native.badger Pool": badger.token.balanceOf(
badger.getSettRewards("native.badger")
),
"native.uniBadgerWbtc Pool": badger.token.balanceOf(
badger.getSettRewards("native.uniBadgerWbtc")
),
"native.badger geyser": badger.token.balanceOf(
badger.getGeyser("native.badger")
),
"native.uniBadgerWbtc geyser": badger.token.balanceOf(
badger.getGeyser("native.uniBadgerWbtc")
),
"native.renCrv geyser": badger.token.balanceOf(
badger.getGeyser("native.renCrv")
),
"native.sbtcCrv geyser": badger.token.balanceOf(
badger.getGeyser("native.sbtcCrv")
),
"native.tbtcCrv geyser": badger.token.balanceOf(
badger.getGeyser("native.tbtcCrv")
),
"harvest.renCrv geyser": badger.token.balanceOf(
badger.getGeyser("harvest.renCrv")
),
"daoBadgerTimelock": badger.token.balanceOf(badger.daoBadgerTimelock),
"teamVesting": badger.token.balanceOf(badger.teamVesting),
"badgerHunt": badger.token.balanceOf(badger.badgerHunt),
"badgerTree": badger.initialBadgerTreeBalance,
}
if not badger.test:
expectedSum = 0
actualSum = 0
for key, value in tokenDistributionTargets.items():
print("expected", key, value / 1e18)
print("actual", key, tokenDistributionBalances[key] / 1e18)
expectedSum += value
actualSum += tokenDistributionBalances[key]
print("expectedSum", expectedSum / 1e18)
print("actualSum", actualSum / 1e18)
# assert expectedSum == badger_total_supply
# assert actualSum == badger_total_supply
console.log("after teamVesting", badger.token.balanceOf(deployer) / 1e18)
"""
===== Transfer Ownership =====
- proxyAdmin should be owned by multisig
- All contract owner / admin rights should be given to multisig
"""
badger.rewardsEscrow.transferOwnership(badger.devMultisig, {"from": deployer})
def start_staking_rewards(badger: BadgerSystem):
"""
StakingRewards contracts start immediately when the first tokens are locked
"""
# == Badger ==
deployer = badger.deployer
rewards = badger.getSettRewards("native.badger")
assert (
badger.token.balanceOf(rewards)
>= badger_config.geyserParams.unlockSchedules.badger[0].amount
)
assert rewards.stakingToken() == badger.token
assert rewards.rewardsToken() == badger.token
rewards.notifyRewardAmount(
badger_config.geyserParams.unlockSchedules.badger[0].amount, {"from": deployer}
)
# == Uni LP ==
rewards = badger.getSettRewards("native.uniBadgerWbtc")
assert (
badger.token.balanceOf(rewards)
>= badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0].amount
)
assert rewards.stakingToken() == badger.pair
assert rewards.rewardsToken() == badger.token
rewards.notifyRewardAmount(
badger_config.geyserParams.unlockSchedules.uniBadgerWbtc[0].amount,
{"from": deployer},
)
def deploy_flow(test=False, outputToFile=True, uniswap=False):
badger = test_deploy(test=test, uniswap=uniswap)
print("Test: Badger System Deployed")
if outputToFile:
fileName = "deploy-" + str(chain.id) + "final" + ".json"
print("Printing contract addresses to ", fileName)
print_to_file(badger, fileName)
if not test:
post_deploy_config(badger)
start_staking_rewards(badger)
print("Test: Badger System Setup Complete")
return badger
def main():
return deploy_flow(test=True, outputToFile=False)
| 35.398176
| 187
| 0.675897
|
24255c3e007c7c3896f7418e8236a8238d23de87
| 1,420
|
py
|
Python
|
RNAStructure_processing_example.py
|
wenjingk/MapToCleave
|
53bb73525eabf8d1f7f11781864813d8c7f800f6
|
[
"MIT"
] | null | null | null |
RNAStructure_processing_example.py
|
wenjingk/MapToCleave
|
53bb73525eabf8d1f7f11781864813d8c7f800f6
|
[
"MIT"
] | null | null | null |
RNAStructure_processing_example.py
|
wenjingk/MapToCleave
|
53bb73525eabf8d1f7f11781864813d8c7f800f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
from scripts.RNAStructure import RNAStructure
# input
vienna=sys.argv[1]
rna = RNAStructure(vienna)
# ### MapToCleave hairpin information
# In[2]:
print('# MapToCleave hairpin identity')
print(rna.name)
# In[3]:
print('# MapToCleave hairpin sequence (118nt)')
print(rna.sequence_with_U)
# In[4]:
print('# MapToCleave hairpin secondary structure in dotbracket format')
print(rna.dotbracket)
# In[5]:
print('# MapToCleave hairpin secondary structure in text format')
for i in rna.structure:
print(i)
# In[6]:
print('# reference hairpin, mature and star information based on miRBase v21')
print(rna.refhairpin)
print(rna.refmirna)
# In[7]:
print('# start site of 5p and/or 3p arm identified based on the reads pooled from HEK-293T, NIH-3T3 and MEF transfection samples')
print('# the R means reference start site; D means dominant start site; A means alternative start site. The D and A sites are generated based on the mapping profile')
print('# the text in bracket means sample;position;read abundance')
print(rna.refhairpin)
print(rna.refmirna)
for i in rna.cutposition:
print(i)
# In[8]:
for i in rna.mappingprofile_print:
print(i)
# In[9]:
print('# information obtained from the mapping profile')
print('# D means mature arm and M means start arm identified based on the mapping profile')
for i in rna.mappingprofile_statistics:
print(i)
| 20
| 166
| 0.734507
|
9326ac6e5ab9423ac6e813d971f295fe5745181a
| 14,918
|
py
|
Python
|
superset/security.py
|
zuxqoj/incubator-superset
|
de5972610998d8faf1dfe2036aee07a2ffbc4509
|
[
"Apache-2.0"
] | null | null | null |
superset/security.py
|
zuxqoj/incubator-superset
|
de5972610998d8faf1dfe2036aee07a2ffbc4509
|
[
"Apache-2.0"
] | null | null | null |
superset/security.py
|
zuxqoj/incubator-superset
|
de5972610998d8faf1dfe2036aee07a2ffbc4509
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=C,R,W
"""A set of constants and methods to manage permissions and security"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from flask import g
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from sqlalchemy import or_
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
READ_ONLY_MODEL_VIEWS = {
'DatabaseAsync',
'DatabaseView',
'DruidClusterModelView',
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
'SqlMetricInlineView',
'TableColumnInlineView',
'TableModelView',
'DruidColumnInlineView',
'DruidDatasourceModelView',
'DruidMetricInlineView',
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
'AccessRequestsModelView',
'Manage',
'SQL Lab',
'Queries',
'Refresh Druid Metadata',
'ResetPasswordView',
'RoleModelView',
'Security',
'UserDBModelView',
'UserLDAPModelView',
'UserOAuthModelView',
'UserOIDModelView',
'UserRemoteUserModelView',
}
ALPHA_ONLY_VIEW_MENUS = {
'Upload a CSV',
}
ADMIN_ONLY_PERMISSIONS = {
'all_database_access',
'can_sql_json', # TODO: move can_sql_json to sql_lab role
'can_override_role_permissions',
'can_sync_druid_source',
'can_override_role_permissions',
'can_approve',
'can_update_role',
}
READ_ONLY_PERMISSION = {
'can_show',
'can_list',
}
ALPHA_ONLY_PERMISSIONS = set([
'muldelete',
'all_datasource_access',
])
OBJECT_SPEC_PERMISSIONS = set([
'database_access',
'schema_access',
'datasource_access',
'metric_access',
])
class SupersetSecurityManager(SecurityManager):
def get_schema_perm(self, database, schema):
if schema:
return '[{}].[{}]'.format(database, schema)
def can_access(self, permission_name, view_name, user=None):
"""Protecting from has_access failing from missing perms/view"""
if not user:
user = g.user
if user.is_anonymous():
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def all_datasource_access(self, user=None):
return self.can_access(
'all_datasource_access', 'all_datasource_access', user=user)
def database_access(self, database, user=None):
return (
self.can_access(
'all_database_access', 'all_database_access', user=user) or
self.can_access('database_access', database.perm, user=user)
)
def schema_access(self, datasource, user=None):
return (
self.database_access(datasource.database, user=user) or
self.all_datasource_access(user=user) or
self.can_access('schema_access', datasource.schema_perm, user=user)
)
def datasource_access(self, datasource, user=None):
return (
self.schema_access(datasource, user=user) or
self.can_access('datasource_access', datasource.perm, user=user)
)
def get_datasource_access_error_msg(self, datasource):
return """This endpoint requires the datasource {}, database or
`all_datasource_access` permission""".format(datasource.name)
def get_datasource_access_link(self, datasource):
from superset import conf
return conf.get('PERMISSION_INSTRUCTIONS_LINK')
def get_table_access_error_msg(self, table_name):
return """You need access to the following tables: {}, all database access or
`all_datasource_access` permission""".format(table_name)
def get_table_access_link(self, tables):
from superset import conf
return conf.get('PERMISSION_INSTRUCTIONS_LINK')
def datasource_access_by_name(
self, database, datasource_name, schema=None):
from superset import db
if self.database_access(database) or self.all_datasource_access():
return True
schema_perm = self.get_schema_perm(database, schema)
if schema and self.can_access('schema_access', schema_perm):
return True
datasources = ConnectorRegistry.query_datasources_by_name(
db.session, database, datasource_name, schema=schema)
for datasource in datasources:
if self.can_access('datasource_access', datasource.perm):
return True
return False
def get_schema_and_table(self, table_in_query, schema):
table_name_pieces = table_in_query.split('.')
if len(table_name_pieces) == 2:
table_schema = table_name_pieces[0]
table_name = table_name_pieces[1]
else:
table_schema = schema
table_name = table_name_pieces[0]
return (table_schema, table_name)
def datasource_access_by_fullname(
self, database, table_in_query, schema):
table_schema, table_name = self.get_schema_and_table(table_in_query, schema)
return self.datasource_access_by_name(
database, table_name, schema=table_schema)
def rejected_datasources(self, sql, database, schema):
superset_query = sql_parse.SupersetQuery(sql)
return [
t for t in superset_query.tables if not
self.datasource_access_by_fullname(database, t, schema)]
def user_datasource_perms(self):
datasource_perms = set()
for r in g.user.roles:
for perm in r.permissions:
if (
perm.permission and
'datasource_access' == perm.permission.name):
datasource_perms.add(perm.view_menu.name)
return datasource_perms
def schemas_accessible_by_user(self, database, schemas):
from superset import db
from superset.connectors.sqla.models import SqlaTable
if self.database_access(database) or self.all_datasource_access():
return schemas
subset = set()
for schema in schemas:
schema_perm = self.get_schema_perm(database, schema)
if self.can_access('schema_access', schema_perm):
subset.add(schema)
perms = self.user_datasource_perms()
if perms:
tables = (
db.session.query(SqlaTable)
.filter(
SqlaTable.perm.in_(perms),
SqlaTable.database_id == database.id,
)
.all()
)
for t in tables:
if t.schema:
subset.add(t.schema)
return sorted(list(subset))
def accessible_by_user(self, database, datasource_names, schema=None):
from superset import db
if self.database_access(database) or self.all_datasource_access():
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if self.can_access('schema_access', schema_perm):
return datasource_names
user_perms = self.user_datasource_perms()
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
db.session, database, user_perms)
if schema:
names = {
d.table_name
for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
else:
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if d in full_names]
def merge_perm(self, permission_name, view_menu_name):
# Implementation copied from sm.find_permission_view_menu.
# TODO: use sm.find_permission_view_menu once issue
# https://github.com/airbnb/superset/issues/1944 is resolved.
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
def is_user_defined_permission(self, perm):
return perm.permission.name in OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self):
# Global perms
self.merge_perm('all_datasource_access', 'all_datasource_access')
self.merge_perm('all_database_access', 'all_database_access')
def create_missing_perms(self):
"""Creates missing perms for datasources, schemas and metrics"""
from superset import db
from superset.models import core as models
logging.info(
'Fetching a set of all perms to lookup which ones are missing')
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.merge_perm(view_menu, perm)
logging.info('Creating missing datasource permissions.')
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
logging.info('Creating missing database permissions.')
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
logging.info('Creating missing metrics permissions')
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
for metric in metrics:
if metric.is_restricted:
merge_pv('metric_access', metric.perm)
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
logging.info('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(ab_models.PermissionView)
.filter(or_(
ab_models.PermissionView.permission == None, # NOQA
ab_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info('Deleted {} faulty permissions'.format(deleted_count))
def sync_role_definitions(self):
"""Inits the Superset application with security roles and such"""
from superset import conf
logging.info('Syncing role definition')
self.create_custom_permissions()
# Creating default roles
self.set_role('Admin', self.is_admin_pvm)
self.set_role('Alpha', self.is_alpha_pvm)
self.set_role('Gamma', self.is_gamma_pvm)
self.set_role('granter', self.is_granter_pvm)
self.set_role('sql_lab', self.is_sql_lab_pvm)
self.set_role('Dashboard_Viewer', self.is_dashboard_viewer_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
self.set_role('Public', self.is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
def set_role(self, role_name, pvm_check):
logging.info('Syncing {} perms'.format(role_name))
sesh = self.get_session
pvms = sesh.query(ab_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
sesh.merge(role)
sesh.commit()
def is_admin_only(self, pvm):
# not readonly operations on read only model views allowed only for admins
if (pvm.view_menu.name in READ_ONLY_MODEL_VIEWS and
pvm.permission.name not in READ_ONLY_PERMISSION):
return True
return (
pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
pvm.permission.name in ADMIN_ONLY_PERMISSIONS
)
def is_alpha_only(self, pvm):
if (pvm.view_menu.name in GAMMA_READ_ONLY_MODEL_VIEWS and
pvm.permission.name not in READ_ONLY_PERMISSION):
return True
return (
pvm.view_menu.name in ALPHA_ONLY_VIEW_MENUS or
pvm.permission.name in ALPHA_ONLY_PERMISSIONS
)
def is_admin_pvm(self, pvm):
return not self.is_user_defined_permission(pvm)
def is_alpha_pvm(self, pvm):
return not (self.is_user_defined_permission(pvm) or self.is_admin_only(pvm))
def is_gamma_pvm(self, pvm):
return not (self.is_user_defined_permission(pvm) or self.is_admin_only(pvm) or
self.is_alpha_only(pvm))
def is_sql_lab_pvm(self, pvm):
return (
pvm.view_menu.name in {
'SQL Lab', 'SQL Editor', 'Query Search', 'Saved Queries',
} or
pvm.permission.name in {
'can_sql_json', 'can_csv', 'can_search_queries', 'can_sqllab_viz',
'can_sqllab',
})
def is_dashboard_viewer_pvm(self, pvm):
return ( self.is_base_view_pvm(pvm) or self.is_base_security_pvm(pvm) or
pvm.permission.name in {
'can_dashboard', 'can_explore_json',
} or (pvm.permission.name in {'can_list'} and pvm.view_menu.name in {'CssTemplateAsyncModelView', 'DashboardModelViewAsync' })
)
def is_base_view_pvm(self, pvm):
return (
pvm.permission.name in {
'can_fave_slices', 'can_fave_dashboards', 'can_recent_activity',
})
def is_base_security_pvm(self, pvm):
return (
pvm.permission.name in {
'can_userinfo' , 'resetmypassword' , 'can_this_form_get' , 'can_this_form_post'
} and pvm.view_menu.name in { 'UserDBModelView', 'ResetMyPasswordView' }
# above code will allow some options which are not in PVM
# but i am shortcircuiting thsi for now as those permission will be not added to role
)
def is_granter_pvm(self, pvm):
return pvm.permission.name in {
'can_override_role_permissions', 'can_approve',
}
| 36.474328
| 138
| 0.648545
|
2a84e2a4ceadb8570c7abe4b43cfbd7b699b7f9c
| 4,436
|
py
|
Python
|
cold_posterior_bnn/core/frn.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-01-19T23:35:59.000Z
|
2022-01-19T23:35:59.000Z
|
cold_posterior_bnn/core/frn.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
cold_posterior_bnn/core/frn.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Filter Response Normalization (FRN) layer for Keras.
Filter Response Normalization is a recently published method that serves as a
drop-in replacement for batch normalization in convolutional neural networks.
The paper is due to Singh and Krishnan, https://arxiv.org/pdf/1911.09737.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow.keras.regularizers as regularizers # pylint: disable=g-explicit-tensorflow-version-import
# FRN from https://arxiv.org/pdf/1911.09737.pdf
class FRN(tf.keras.layers.Layer):
"""Filter Response Normalization (FRN) layer."""
def __init__(self,
reg_epsilon=1.0e-6,
tau_regularizer=None,
beta_regularizer=None,
gamma_regularizer=None,
**kwargs):
"""Initialize the FRN layer.
Args:
reg_epsilon: float, the regularization parameter preventing a division by
zero.
tau_regularizer: tf.keras.regularizer for tau.
beta_regularizer: tf.keras.regularizer for beta.
gamma_regularizer: tf.keras.regularizer for gamma.
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self.reg_epsilon = reg_epsilon
self.tau_regularizer = regularizers.get(tau_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
super(FRN, self).__init__(**kwargs)
def build(self, input_shape):
par_shape = (1, 1, 1, input_shape[-1]) # [1,1,1,C]
self.tau = self.add_weight('tau', shape=par_shape, initializer='zeros',
regularizer=self.tau_regularizer,
trainable=True)
self.beta = self.add_weight('beta', shape=par_shape, initializer='zeros',
regularizer=self.beta_regularizer,
trainable=True)
self.gamma = self.add_weight('gamma', shape=par_shape, initializer='ones',
regularizer=self.gamma_regularizer,
trainable=True)
def call(self, x):
nu2 = tf.reduce_mean(tf.math.square(x), axis=[1, 2], keepdims=True)
x = x * tf.math.rsqrt(nu2 + self.reg_epsilon)
y = self.gamma*x + self.beta
z = tf.maximum(y, self.tau)
return z
def get_config(self):
config = super(FRN, self).get_config()
config.update({
'reg_epsilon': self.reg_epsilon,
'tau_regularizer': regularizers.serialize(self.tau_regularizer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
})
return config
class TLU(tf.keras.layers.Layer):
"""Thresholded linear unit (TLU) layer."""
def __init__(self,
tau_regularizer=None,
**kwargs):
"""Initialize the TLU layer.
Args:
tau_regularizer: tf.keras.regularizer for tau.
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self.tau_regularizer = regularizers.get(tau_regularizer)
super(TLU, self).__init__(**kwargs)
def build(self, input_shape):
par_shape = (1, 1, 1, input_shape[-1]) # [1,1,1,C]
self.tau = self.add_weight('tau', shape=par_shape, initializer='zeros',
regularizer=self.tau_regularizer,
trainable=True)
def call(self, x):
return tf.maximum(x, self.tau)
def get_config(self):
config = super(TLU, self).get_config()
config.update({
'tau_regularizer': regularizers.serialize(self.tau_regularizer),
})
return config
| 36.966667
| 109
| 0.669973
|
67162283d0c1692fd44b60011c31d3121559f7d8
| 899
|
py
|
Python
|
setup.py
|
DasIch/argvard
|
2603e323a995e0915ce41fcf49e2a82519556195
|
[
"Apache-2.0"
] | 3
|
2015-02-09T02:28:30.000Z
|
2020-04-26T00:31:40.000Z
|
setup.py
|
DasIch/argvard
|
2603e323a995e0915ce41fcf49e2a82519556195
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
DasIch/argvard
|
2603e323a995e0915ce41fcf49e2a82519556195
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import os
from setuptools import setup
from argvard import __version__
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
setup(
name='Argvard',
version=__version__,
url='https://github.com/DasIch/argvard',
author='Daniel Neuhäuser',
author_email='ich@danielneuhaeuser.de',
license='Apache License, Version 2.0',
description='Framework for command line applications',
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=['argvard']
)
| 29
| 74
| 0.665184
|
dfe6bab08ec9d355967edcf055241e10da8379f1
| 2,580
|
py
|
Python
|
urlfetch/urlfetch.py
|
sourcery-ai-bot/aikaterna-cogs
|
75c84f53d5a3a1aa91564fef87cc8d5e67887707
|
[
"Apache-2.0"
] | null | null | null |
urlfetch/urlfetch.py
|
sourcery-ai-bot/aikaterna-cogs
|
75c84f53d5a3a1aa91564fef87cc8d5e67887707
|
[
"Apache-2.0"
] | null | null | null |
urlfetch/urlfetch.py
|
sourcery-ai-bot/aikaterna-cogs
|
75c84f53d5a3a1aa91564fef87cc8d5e67887707
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import aiohttp
import logging
from urllib.parse import urlparse
from redbot.core import checks, commands
from redbot.core.utils.chat_formatting import box, pagify
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
log = logging.getLogger("red.aikaterna.urlfetch")
__version__ = "1.1.0"
class UrlFetch(commands.Cog):
"""Grab stuff from a text API."""
def __init__(self, bot):
self.bot = bot
self._headers = {'User-Agent': 'Python/3.8'}
@commands.command()
async def urlfetch(self, ctx, url: str):
"""
Input a URL to read.
"""
async with ctx.typing():
valid_url = await self._valid_url(ctx, url)
if not valid_url:
return
text = await self._get_url_content(url)
if text:
page_list = []
for page in pagify(text, delims=["\n"], page_length=1800):
page_list.append(box(page))
if len(page_list) == 1:
await ctx.send(box(page))
else:
await menu(ctx, page_list, DEFAULT_CONTROLS)
async def _get_url_content(self, url: str):
try:
timeout = aiohttp.ClientTimeout(total=20)
async with aiohttp.ClientSession(headers=self._headers, timeout=timeout) as session:
async with session.get(url) as resp:
text = await resp.text()
return text
except aiohttp.client_exceptions.ClientConnectorError:
log.error(f"aiohttp failure accessing site at url:\n\t{url}", exc_info=True)
return None
except asyncio.exceptions.TimeoutError:
log.error(f"asyncio timeout while accessing feed at url:\n\t{url}")
return None
except Exception:
log.error(f"General failure accessing site at url:\n\t{url}", exc_info=True)
return None
async def _valid_url(self, ctx, url: str):
try:
result = urlparse(url)
except Exception as e:
log.exception(e, exc_info=e)
await ctx.send("There was an issue trying to fetch that site. Please check your console for the error.")
return None
if all([result.scheme, result.netloc]):
text = await self._get_url_content(url)
if text:
return text
await ctx.send("No text present at the given url.")
else:
await ctx.send(f"That url seems to be incomplete.")
return None
| 33.076923
| 116
| 0.587597
|
f58b16638322bc508a91c9ba75736ca783520dc7
| 2,712
|
py
|
Python
|
parkingmotel.ie/parkingmotel/parkingmotel/settings.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
parkingmotel.ie/parkingmotel/parkingmotel/settings.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | 1
|
2015-11-08T11:49:35.000Z
|
2015-11-08T11:49:43.000Z
|
parkingmotel.ie/parkingmotel/parkingmotel/settings.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
"""
Django settings for parkingmotel project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yj1h$xsbl96t9pm$d+^y28kr!x6_+75omttl@0bmf_0&$+-%82'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'parkingmotel.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'parkingmotel.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except:
pass
| 25.111111
| 71
| 0.702434
|
9bcfdb9a82d86b08c32ffff33162f99b3052ba6b
| 6,139
|
py
|
Python
|
restservice/migrations/0001_initial.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 123
|
2015-01-08T09:21:05.000Z
|
2021-11-14T19:45:23.000Z
|
restservice/migrations/0001_initial.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 16
|
2015-02-13T16:56:42.000Z
|
2021-02-20T23:58:43.000Z
|
restservice/migrations/0001_initial.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 110
|
2015-01-19T14:34:06.000Z
|
2021-02-01T14:55:11.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RestService'
db.create_table('restservice_restservice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('service_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('xform', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['odk_logger.XForm'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('restservice', ['RestService'])
def backwards(self, orm):
# Deleting model 'RestService'
db.delete_table('restservice_restservice')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'restservice.restservice': {
'Meta': {'object_name': 'RestService'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.XForm']"})
}
}
complete_apps = ['restservice']
| 66.728261
| 182
| 0.566705
|
e501ecffb31bd049350296fc92c96929e1767b9c
| 9,307
|
py
|
Python
|
configs/ruby/DSI.py
|
Aayush-Ankit/ece666-parallelArch-gem5
|
3c20f5852a4b4f7ae59b69a46f483cfee06ad20f
|
[
"BSD-3-Clause"
] | 1
|
2021-04-29T03:26:29.000Z
|
2021-04-29T03:26:29.000Z
|
configs/ruby/DSI.py
|
Aayush-Ankit/ece666-parallelArch-gem5
|
3c20f5852a4b4f7ae59b69a46f483cfee06ad20f
|
[
"BSD-3-Clause"
] | null | null | null |
configs/ruby/DSI.py
|
Aayush-Ankit/ece666-parallelArch-gem5
|
3c20f5852a4b4f7ae59b69a46f483cfee06ad20f
|
[
"BSD-3-Clause"
] | 1
|
2019-11-13T00:40:44.000Z
|
2019-11-13T00:40:44.000Z
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology, create_directories
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L1Cache(RubyCache): pass
def define_options(parser):
return
def create_system(options, full_system, system, dma_ports, bootmem,
ruby_system):
if buildEnv['PROTOCOL'] != 'DSI':
panic("This script requires the DSI protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in range(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D
# config parameters.
#
cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
dsiFIFO_size = options.l1d_dsiFIFO_size)
# the ruby random tester reuses num_cpus to specify the
# number of cpu ports connected to the tester object, which
# is stored in system.cpu. because there is only ever one
# tester object, num_cpus is not necessarily equal to the
# size of system.cpu; therefore if len(system.cpu) == 1
# we use system.cpu[0] to set the clk_domain, thereby ensuring
# we don't index off the end of the cpu list.
if len(system.cpu) == 1:
clk_domain = system.cpu[0].clk_domain
else:
clk_domain = system.cpu[i].clk_domain
# Only one unified L1 cache exists. Can cache instructions and data.
l1_cntrl = L1Cache_Controller(version=i, cacheMemory=cache,
send_evictions=send_evicts(options),
transitions_per_cycle=options.ports,
clk_domain=clk_domain,
ruby_system=ruby_system)
cpu_seq = RubySequencer(version=i, icache=cache, dcache=cache,
clk_domain=clk_domain, ruby_system=ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.requestFromCache = MessageBuffer(ordered = True)
l1_cntrl.requestFromCache.master = ruby_system.network.slave
l1_cntrl.responseFromCache = MessageBuffer(ordered = True)
l1_cntrl.responseFromCache.master = ruby_system.network.slave
l1_cntrl.forwardToCache = MessageBuffer(ordered = True)
l1_cntrl.forwardToCache.slave = ruby_system.network.master
l1_cntrl.responseToCache = MessageBuffer(ordered = True)
l1_cntrl.responseToCache.slave = ruby_system.network.master
# If Running in DSI_MODE
l1_cntrl.DSI_MODE = bool(options.DSI_MODE);
print ("DSI_MODE: ")
print(l1_cntrl.DSI_MODE)
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
mem_dir_cntrl_nodes, rom_dir_cntrl_node = create_directories(
options, bootmem, ruby_system, system)
dir_cntrl_nodes = mem_dir_cntrl_nodes[:]
if rom_dir_cntrl_node is not None:
dir_cntrl_nodes.append(rom_dir_cntrl_node)
for dir_cntrl in dir_cntrl_nodes:
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer(ordered = True)
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True)
dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master
dir_cntrl.responseToDir = MessageBuffer(ordered = True)
dir_cntrl.responseToDir.slave = ruby_system.network.master
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True)
dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave
dir_cntrl.forwardFromDir = MessageBuffer()
dir_cntrl.forwardFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
dir_cntrl.directory.version_bits = options.dsi_version_bits
for i, dma_port in enumerate(dma_ports):
# Added -check that DMA ports aren't created
assert 1==0, "Aayush: DMA port is created"
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the directory controllers and the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.requestToDir = MessageBuffer()
dma_cntrl.requestToDir.master = ruby_system.network.slave
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
io_controller.requestToDir = MessageBuffer()
io_controller.requestToDir.master = ruby_system.network.slave
io_controller.responseFromDir = MessageBuffer(ordered = True)
io_controller.responseFromDir.slave = ruby_system.network.master
all_cntrls = all_cntrls + [io_controller]
ruby_system.network.number_of_virtual_networks = 5
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, mem_dir_cntrl_nodes, topology)
| 44.5311
| 82
| 0.686365
|
41eaefb54fa6103f6dbbd40ded4b35c8d5896ff7
| 6,171
|
py
|
Python
|
4-Object_Detection/RPN/demo.py
|
Jack19960208/TensorFlow2.0-Examples
|
f0ec3fbd27af90f0d040cd70ec1fd3ad408c3a6d
|
[
"MIT"
] | 1
|
2019-10-21T11:39:03.000Z
|
2019-10-21T11:39:03.000Z
|
4-Object_Detection/RPN/demo.py
|
Jack19960208/TensorFlow2.0-Examples
|
f0ec3fbd27af90f0d040cd70ec1fd3ad408c3a6d
|
[
"MIT"
] | null | null | null |
4-Object_Detection/RPN/demo.py
|
Jack19960208/TensorFlow2.0-Examples
|
f0ec3fbd27af90f0d040cd70ec1fd3ad408c3a6d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : demo.py
# Author : YunYang1994
# Created date: 2019-10-20 15:06:46
# Description :
#
#================================================================
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from rpn import RPNplus
from utils import compute_iou, plot_boxes_on_image, wandhG, load_gt_boxes, compute_regression, decode_output
pos_thresh = 0.5
neg_thresh = 0.1
iou_thresh = 0.5
grid_width = 16
grid_height = 16
image_height = 720
image_width = 960
wandhG = np.array(wandhG, dtype=np.float32)
image_path = "/home/yang/dataset/synthetic_dataset/image/1.jpg"
gt_boxes = load_gt_boxes("/home/yang/dataset/synthetic_dataset/imageAno/1.txt")
raw_image = cv2.imread(image_path)
image_with_gt_boxes = np.copy(raw_image)
plot_boxes_on_image(image_with_gt_boxes, gt_boxes)
Image.fromarray(image_with_gt_boxes).show()
encoded_image = np.copy(raw_image)
target_scores = np.zeros(shape=[45, 60, 9, 2]) # 0: background, 1: foreground, ,
target_bboxes = np.zeros(shape=[45, 60, 9, 4]) # t_x, t_y, t_w, t_h
target_masks = np.zeros(shape=[45, 60, 9]) # negative_samples: -1, positive_samples: 1
################################### ENCODE INPUT #################################
for i in range(45):
for j in range(60):
for k in range(9):
center_x = j * grid_width + grid_width * 0.5
center_y = i * grid_height + grid_height * 0.5
xmin = center_x - wandhG[k][0] * 0.5
ymin = center_y - wandhG[k][1] * 0.5
xmax = center_x + wandhG[k][0] * 0.5
ymax = center_y + wandhG[k][1] * 0.5
# ignore cross-boundary anchors
if (xmin > -5) & (ymin > -5) & (xmax < (image_width+5)) & (ymax < (image_height+5)):
anchor_boxes = np.array([xmin, ymin, xmax, ymax])
anchor_boxes = np.expand_dims(anchor_boxes, axis=0)
# compute iou between this anchor and all ground-truth boxes in image.
ious = compute_iou(anchor_boxes, gt_boxes)
positive_masks = ious > pos_thresh
negative_masks = ious < neg_thresh
if np.any(positive_masks):
plot_boxes_on_image(encoded_image, anchor_boxes, thickness=1)
print("=> Encoding positive sample: %d, %d, %d" %(i, j, k))
cv2.circle(encoded_image, center=(int(0.5*(xmin+xmax)), int(0.5*(ymin+ymax))),
radius=1, color=[255,0,0], thickness=4)
target_scores[i, j, k, 1] = 1.
target_masks[i, j, k] = 1 # labeled as a positive sample
# find out which ground-truth box matches this anchor
max_iou_idx = np.argmax(ious)
selected_gt_boxes = gt_boxes[max_iou_idx]
target_bboxes[i, j, k] = compute_regression(selected_gt_boxes, anchor_boxes[0])
if np.all(negative_masks):
target_scores[i, j, k, 0] = 1.
target_masks[i, j, k] = -1 # labeled as a negative sample
cv2.circle(encoded_image, center=(int(0.5*(xmin+xmax)), int(0.5*(ymin+ymax))),
radius=1, color=[0,0,0], thickness=4)
Image.fromarray(encoded_image).show()
################################### DECODE OUTPUT #################################
decode_image = np.copy(raw_image)
pred_boxes = []
pred_score = []
for i in range(45):
for j in range(60):
for k in range(9):
# 预测的 pred boxes 坐标
center_x = j * grid_width + 0.5 * grid_width
center_y = i * grid_height + 0.5 * grid_height
anchor_xmin = center_x - 0.5 * wandhG[k, 0]
anchor_ymin = center_y - 0.5 * wandhG[k, 1]
xmin = target_bboxes[i, j, k, 0] * wandhG[k, 0] + anchor_xmin
ymin = target_bboxes[i, j, k, 1] * wandhG[k, 1] + anchor_ymin
xmax = tf.exp(target_bboxes[i, j, k, 2]) * wandhG[k, 0] + xmin
ymax = tf.exp(target_bboxes[i, j, k, 3]) * wandhG[k, 1] + ymin
if target_scores[i, j, k, 1] > 0: # it is a positive sample
print("=> Decoding positive sample: %d, %d, %d" %(i, j, k))
cv2.circle(decode_image, center=(int(0.5*(xmin+xmax)), int(0.5*(ymin+ymax))),
radius=1, color=[255,0,0], thickness=4)
pred_boxes.append(np.array([xmin, ymin, xmax, ymax]))
pred_score.append(target_scores[i, j, k, 1])
pred_boxes = np.array(pred_boxes)
plot_boxes_on_image(decode_image, pred_boxes, color=[0, 255, 0])
Image.fromarray(np.uint8(decode_image)).show()
############################## FASTER DECODE OUTPUT ###############################
faster_decode_image = np.copy(raw_image)
# pred_bboxes = target_bboxes
# pred_scores = target_scores.astype(np.float32)
pred_bboxes = np.expand_dims(target_bboxes, 0).astype(np.float32)
pred_scores = np.expand_dims(target_scores, 0).astype(np.float32)
pred_scores, pred_bboxes = decode_output(pred_bboxes, pred_scores)
plot_boxes_on_image(faster_decode_image, pred_bboxes, color=[255, 0, 0]) # red boundig box
Image.fromarray(np.uint8(faster_decode_image)).show()
## bboxes
# grid_x, grid_y = tf.range(60, dtype=tf.int32), tf.range(45, dtype=tf.int32)
# grid_x, grid_y = tf.meshgrid(grid_x, grid_y)
# grid_x, grid_y = tf.expand_dims(grid_x, -1), tf.expand_dims(grid_y, -1)
# grid_xy = tf.stack([grid_x, grid_y], axis=-1)
# center_xy = grid_xy * 16 + 8
# center_xy = tf.cast(center_xy, tf.float32)
# anchor_xymin = center_xy - 0.5 * wandhG
# xy_min = pred_bboxes[..., 0:2] * wandhG[:, 0:2] + anchor_xymin
# xy_max = tf.exp(pred_bboxes[..., 2:4]) * wandhG[:, 0:2] + xy_min
# pred_bboxes = tf.concat([xy_min, xy_max], axis=-1)
# score_mask = pred_scores > 0.
# pred_bboxes = tf.reshape(pred_bboxes[score_mask], shape=[-1,4]).numpy()
# pred_scores = tf.reshape(pred_scores[score_mask], shape=[-1,]).numpy()
| 41.695946
| 108
| 0.583536
|
c0d9d58c31a819cf0e4a15f14530756be98c5d6e
| 1,081
|
py
|
Python
|
J_linked_list/problems/I_pairwise_swap_nodes_of_linked_list.py
|
Princeton21/DSA
|
0f2321b284fc54f4ddf73733cc1a8d05e549aeea
|
[
"MIT"
] | 58
|
2021-01-06T10:05:51.000Z
|
2022-02-10T05:15:19.000Z
|
J_linked_list/problems/I_pairwise_swap_nodes_of_linked_list.py
|
Princeton21/DSA
|
0f2321b284fc54f4ddf73733cc1a8d05e549aeea
|
[
"MIT"
] | 5
|
2021-02-22T04:14:24.000Z
|
2021-12-26T09:19:17.000Z
|
J_linked_list/problems/I_pairwise_swap_nodes_of_linked_list.py
|
Princeton21/DSA
|
0f2321b284fc54f4ddf73733cc1a8d05e549aeea
|
[
"MIT"
] | 27
|
2021-02-09T13:58:33.000Z
|
2022-03-06T03:48:08.000Z
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def pairwiseSwap(self):
temp = self.head
if temp is None:
return
while temp is not None and temp.next is not None:
if temp.data == temp.next.data:
temp = temp.next.next
else:
temp.data, temp.next.data = temp.next.data, temp.data
temp = temp.next.next
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printList(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
if __name__ == "__main__":
"""
from timeit import timeit
llist = LinkedList()
llist.push(5)
llist.push(4)
llist.push(3)
llist.push(2)
llist.push(1)
llist.pairwiseSwap()
print(timeit(lambda: llist.printList(), number=10000)) # 0.27164168400122435
"""
| 22.520833
| 81
| 0.553191
|
9fd22edfa5eceb96f9723497e0ac8f9561721dae
| 2,058
|
py
|
Python
|
src/rejection-sampling.py
|
waidotto/ridgelet-transform-numerial-experiment
|
2af1dc969347031c64e168cf071eb13d9ac1824c
|
[
"MIT"
] | 4
|
2018-09-01T11:13:27.000Z
|
2020-10-12T02:37:44.000Z
|
src/rejection-sampling.py
|
waidotto/ridgelet-transform-numerial-experiment
|
2af1dc969347031c64e168cf071eb13d9ac1824c
|
[
"MIT"
] | null | null | null |
src/rejection-sampling.py
|
waidotto/ridgelet-transform-numerial-experiment
|
2af1dc969347031c64e168cf071eb13d9ac1824c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#棄却法でオラクル分布から重みを生成する
import numpy as np
import numpy.matlib
import random
#活性化関数
def eta(x):
return np.exp(-np.square(x) / 2.0) #Gauss関数(正規分布)
with open('./temp/setting.py') as f:
exec(f.read())
#ファイル読み込み
dist = np.loadtxt('./output/numerical-ridgelet.txt', usecols = range(J))
Max = np.max(np.abs(dist))
Min = np.min(np.abs(dist))
j = 0
tries = 0
a = np.zeros(number_of_hidden_nodes) #a_j
b = np.zeros(number_of_hidden_nodes) #b_j
c = np.zeros(number_of_hidden_nodes) #c_j
#棄却法で(a_j, b_j)を生成する
print('oracle sampling...')
while(True):
tries += 1
ra = random.randint(0, I - 1)
rb = random.randint(0, J - 1)
r = random.uniform(Min, Max)
if(r <= abs(dist[ra][rb])):
a[j] = -30 + ra * Delta_a
b[j] = -30 + rb * Delta_b
j += 1
if(j >= number_of_hidden_nodes):
print("done. ({0} times tried)".format(tries))
break
#最小二乗法でc_jを求める
x = np.linspace(-1, 1, N, dtype = np.float32)
xs3d = np.kron(np.ones([number_of_hidden_nodes, number_of_hidden_nodes, 1]), x) #(x_s)_ijs
ai3d = np.kron(np.ones([1, number_of_hidden_nodes, N]), a.reshape(number_of_hidden_nodes, 1, 1)) #(a_i)_ijs
bi3d = np.kron(np.ones([1, number_of_hidden_nodes, N]), b.reshape(number_of_hidden_nodes, 1, 1)) #(b_i)_ijs
aj3d = np.kron(np.ones([number_of_hidden_nodes, 1, N]), a.reshape(1, number_of_hidden_nodes, 1)) #(a_j)_ijs
bj3d = np.kron(np.ones([number_of_hidden_nodes, 1, N]), b.reshape(1, number_of_hidden_nodes, 1)) #(b_j)_ijs
A = np.sum(eta(ai3d * xs3d - bi3d) * eta(aj3d * xs3d - bj3d), axis = 2) #s軸で総和を取る
xs2d = np.matlib.repmat(x, number_of_hidden_nodes, 1) #(x_s)_is
ai2d = np.matlib.repmat(a, N, 1).transpose() #(a_i)_is
bi2d = np.matlib.repmat(b, N, 1).transpose() #(b_i)_is
vec_b = np.sum(f(xs2d) * eta(ai2d * xs2d - bi2d), axis = 1) #s軸で総和を取る
c = np.linalg.inv(A).dot(vec_b) #連立一次方程式を解く
print('writing to ./output/oracle-sampled-weight.txt')
with open("./output/oracle-sampled-weight.txt", mode = 'w') as f:
for j in range(number_of_hidden_nodes):
f.write("{0:.6f} {1:.6f} {2:.6f}\n".format(a[j], b[j], c[j]))
print('done.')
| 31.181818
| 107
| 0.675413
|
c838d8900a25a261d655879ddfcf5e52b6ce5867
| 12,196
|
py
|
Python
|
DB-CNN/Mars_experiments/model.py
|
bitzj2015/DB-CNN
|
36a173daa37d22448c1d4f3a76b7953927b7d75e
|
[
"MIT"
] | 5
|
2019-01-08T11:35:38.000Z
|
2021-12-01T05:30:10.000Z
|
DB-CNN/Mars_experiments/model.py
|
bitzj2015/DB-CNN
|
36a173daa37d22448c1d4f3a76b7953927b7d75e
|
[
"MIT"
] | null | null | null |
DB-CNN/Mars_experiments/model.py
|
bitzj2015/DB-CNN
|
36a173daa37d22448c1d4f3a76b7953927b7d75e
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from utils import conv2d_flipkernel, conv2d_flipkernel_d
def VIN(X, S1, S2, config, length):
keep_drop = 0.5
k = config.k # Number of value iterations performed
ch_i = config.ch_i # Channels in input layer
ch_h = config.ch_h # Channels in initial hidden layer
ch_q = config.ch_q # Channels in q layer (~actions)
state_batch_size = length # k+1 state inputs for each channel
w11 = tf.Variable(np.random.randn(5, 5, 2, 6) * 0.01, dtype=tf.float32)
w22 = tf.Variable(np.random.randn(4, 4, 6, 12) * 0.01, dtype=tf.float32)
bias = tf.Variable(np.random.randn(1, 1, 1, ch_h) * 0.01, dtype=tf.float32)
w0 = tf.Variable(np.random.randn(3, 3, 13, ch_h) * 0.01, dtype=tf.float32)
w1 = tf.Variable(np.random.randn(1, 1, ch_h, 1) * 0.01, dtype=tf.float32)
w = tf.Variable(np.random.randn(3, 3, 1, ch_q) * 0.01, dtype=tf.float32)
w_fb = tf.Variable(np.random.randn(3, 3, 1, ch_q) * 0.01, dtype=tf.float32)
w_o = tf.Variable(np.random.randn(ch_q, 8) * 0.01, dtype=tf.float32)
h1 = conv2d_flipkernel(X[:, :, :, 0:2], w11, name="h1")
pool0 = tf.nn.max_pool(h1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
h2 = conv2d_flipkernel(pool0, w22, name="h2")
pool1 = tf.nn.max_pool(h2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
pool00 = tf.nn.max_pool(X[:, :, :, 2:3], [1, 4, 4, 1], [
1, 4, 4, 1], padding='SAME')
r0 = tf.concat([pool1, pool00], axis=-1)
h = conv2d_flipkernel(r0, w0, name="h0") + bias
r = conv2d_flipkernel(h, w1, name="r")
q = conv2d_flipkernel(r, w, name="q")
v = tf.reduce_max(q, axis=3, keep_dims=True, name="v")
for i in range(0, k - 1):
rv = tf.concat([r, v], 3) # connect two matrix
wwfb = tf.concat([w, w_fb], 2)
q = conv2d_flipkernel(rv, wwfb, name="q")
v = tf.reduce_max(q, axis=3, keep_dims=True, name="v")
# do one last convolution
q = conv2d_flipkernel(tf.concat([r, v], 3),
tf.concat([w, w_fb], 2), name="q")
# CHANGE TO THEANO ORDERING
# Since we are selecting over channels, it becomes easier to work with
# the tensor when it is in NCHW format vs NHWC
v = tf.reduce_max(q, axis=3, keep_dims=True, name="v")
q = tf.transpose(q, perm=[0, 3, 1, 2])
# Select the conv-net channels at the state position (S1,S2).
# This intuitively corresponds to each channel representing an action, and the convnet the Q function.
# The tricky thing is we want to select the same (S1,S2) position *for each* channel and for each sample
# TODO: performance can be improved here by substituting expensive
# transpose calls with better indexing for gather_nd
bs = tf.shape(q)[0]
rprn = tf.reshape(
tf.tile(tf.reshape(tf.range(bs), [-1, 1]), [1, state_batch_size]), [-1])
ins1 = tf.cast(tf.reshape(S1, [-1]), tf.int32)
ins2 = tf.cast(tf.reshape(S2, [-1]), tf.int32)
idx_in = tf.transpose(tf.stack([ins1, ins2, rprn]), [1, 0])
q_out = tf.gather_nd(tf.transpose(q, [2, 3, 0, 1]), idx_in, name="q_out")
logits = tf.matmul(q_out, w_o)
output = tf.nn.softmax(logits, name="output")
return logits, output, v, v
def DB_CNN(X, S1, S2, config, keep_drop, length):
k = config.k # Number of value iterations performed
ch_i = config.ch_i # Channels in input layer
ch_h = config.ch_h # Channels in initial hidden layer
ch_q = config.ch_q # Channels in q layer (~actions)
state_batch_size = length # k+1 state inputs for each channel
N = 20
M = 20
bias = tf.Variable(tf.truncated_normal(
[1, 1, 1, N], dtype=tf.float32, stddev=0.001))
w11 = tf.Variable(np.random.randn(5, 5, 2, 6) * 0.001, dtype=tf.float32)
w22 = tf.Variable(np.random.randn(4, 4, 6, 12) * 0.001, dtype=tf.float32)
w0 = tf.Variable(tf.truncated_normal(
[3, 3, 13, N], dtype=tf.float32, stddev=0.001))
w1 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w2 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w3 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w4 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w5 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w6 = tf.Variable(tf.truncated_normal(
[3, 3, N, N], dtype=tf.float32, stddev=0.001))
w = tf.Variable(tf.truncated_normal(
[3, 3, N, ch_q], dtype=tf.float32, stddev=0.001))
w_o = tf.Variable(tf.truncated_normal(
[ch_q * 1, 8], dtype=tf.float32, stddev=0.001))
h1 = tf.nn.relu(conv2d_flipkernel(X[:, :, :, 0:2] * 100, w11, name="h1"))
pool0 = tf.nn.max_pool(h1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
h2 = tf.nn.relu(conv2d_flipkernel(pool0, w22, name="h2"))
pool1 = tf.nn.max_pool(h2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
#h3 = conv2d_flipkernel(pool1, w33, name="h2")
pool00 = tf.nn.max_pool(X[:, :, :, 2:3] * 10,
[1, 4, 4, 1], [1, 4, 4, 1], padding='SAME')
r0 = tf.concat([pool1, pool00], axis=-1)
h = tf.nn.relu(conv2d_flipkernel(r0, w0, name="h") + bias)
r = tf.nn.relu(conv2d_flipkernel(h, w1, name="r"))
r1 = tf.nn.relu(conv2d_flipkernel(r, w1, name="r1"))
r2 = tf.nn.relu(conv2d_flipkernel(r1, w2, name="r2"))
r3 = tf.nn.relu(conv2d_flipkernel(r2, w2, name="r3"))
r4 = tf.nn.relu(conv2d_flipkernel(r3, w3, name="r4"))
r5 = tf.nn.relu(conv2d_flipkernel(r4, w3, name="r5"))
r6 = tf.nn.relu(conv2d_flipkernel(r5, w4, name="r6"))
r7 = tf.nn.relu(conv2d_flipkernel(r6, w4, name="r7"))
r8 = tf.nn.relu(conv2d_flipkernel(r7, w5, name="r6"))
r9 = tf.nn.relu(conv2d_flipkernel(r8, w5, name="r7"))
q = conv2d_flipkernel(r9, w, name="q")
bs = tf.shape(q)[0]
rprn = tf.reshape(
tf.tile(tf.reshape(tf.range(bs), [-1, 1]), [1, state_batch_size]), [-1])
ins1 = tf.cast(tf.reshape(S1, [-1]), tf.int32)
ins2 = tf.cast(tf.reshape(S2, [-1]), tf.int32)
idx_in = tf.transpose(tf.stack([ins1, ins2, rprn]), [1, 0])
q_out = tf.gather_nd(tf.transpose(q, [1, 2, 0, 3]), idx_in, name="q_out")
w20 = tf.Variable(tf.truncated_normal(
[5, 5, 13, M], dtype=tf.float32, stddev=0.001))
kernel20 = tf.nn.relu(tf.nn.conv2d(r0, w20, [1, 1, 1, 1], padding='SAME'))
bias20 = tf.Variable(tf.constant(0.0, shape=[M], dtype=tf.float32))
conv20 = tf.nn.relu(tf.nn.bias_add(kernel20, bias20))
pool20 = tf.nn.max_pool(conv20, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')
w21 = tf.Variable(tf.truncated_normal(
[3, 3, M, M], dtype=tf.float32, stddev=0.001))
k211 = tf.nn.relu(conv2d_flipkernel(pool20, w21, name="k211"))
#k212 = tf.nn.relu(conv2d_flipkernel(k211, w21, name="k212"))
k213 = tf.nn.relu(conv2d_flipkernel(k211, w21, name="k213") + pool20)
pool21 = tf.nn.max_pool(k213, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')
w22 = tf.Variable(tf.truncated_normal(
[3, 3, M, M], dtype=tf.float32, stddev=0.001))
k221 = tf.nn.relu(conv2d_flipkernel(pool21, w22, name="k221"))
#k222 = tf.nn.relu(conv2d_flipkernel(k221, w22, name="k222"))
k223 = tf.nn.relu(conv2d_flipkernel(k221, w22, name="k222") + pool21)
pool22 = tf.nn.max_pool(k223, [1, 3, 3, 1], [1, 1, 1, 1], padding='SAME')
w23 = tf.Variable(tf.truncated_normal(
[3, 3, M, M], dtype=tf.float32, stddev=0.01))
k231 = tf.nn.relu(conv2d_flipkernel(pool22, w23, name="k231"))
k232 = tf.nn.relu(conv2d_flipkernel(k231, w23, name="k232") + pool22)
pool23 = tf.nn.max_pool(k232, [1, 3, 3, 1], [1, 1, 1, 1], padding='SAME')
a, b, c, d = np.shape(pool23)
reshape = tf.reshape(pool23, [config.batchsize, b * c * d])
reshape_drop = tf.nn.dropout(reshape, keep_drop)
dim = reshape.get_shape()[1].value
w24 = tf.Variable(tf.truncated_normal([dim, 192], stddev=0.001))
bias24 = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32))
local24 = tf.nn.relu(tf.matmul(reshape_drop, w24) + bias24)
local24_drop = tf.nn.dropout(local24, keep_drop)
w25 = tf.Variable(tf.truncated_normal([192, ch_q], stddev=0.001))
bias25 = tf.Variable(tf.constant(0.0, shape=[ch_q], dtype=tf.float32))
local25 = tf.nn.relu(tf.matmul(local24_drop, w25) + bias25)
local = tf.tile(local25, [1, state_batch_size])
local = tf.reshape(local, [config.batchsize * state_batch_size, ch_q])
local_a = tf.tile(local25, [1, 32 * 32])
local_a = tf.reshape(local_a, [bs, 32, 32, ch_q])
wo = tf.tile(tf.reshape(w_o, [1, 1, ch_q * 1, 8]), [bs, 32, 1, 1])
Q = tf.nn.relu(tf.matmul(tf.concat([q, local_a], axis=3), wo))
v = tf.reduce_max(Q, axis=3, keep_dims=True, name="v") * 255
V = tf.reduce_max(q, axis=3, keep_dims=True, name="v") * 255
logits = tf.nn.relu(tf.matmul(q_out, w_o))
output = tf.nn.softmax(logits, name="output")
l2_loss = tf.nn.l2_loss(w0) + tf.nn.l2_loss(w1) + tf.nn.l2_loss(w2) + tf.nn.l2_loss(w3) + tf.nn.l2_loss(w4) + tf.nn.l2_loss(w5) + tf.nn.l2_loss(
return logits, output, l2_loss, v, V
def DCNN(X, S1, S2, config, keep_drop, length):
keep_drop = 0.5
k = config.k # Number of value iterations performed
ch_i = config.ch_i # Channels in input layer
ch_h = config.ch_h # Channels in initial hidden layer
ch_q = config.ch_q # Channels in q layer (~actions)
state_batch_size = length # k+1 state inputs for each channel
w11 = tf.Variable(np.random.randn(5, 5, 2, 6) * 0.01, dtype=tf.float32)
w22 = tf.Variable(np.random.randn(4, 4, 6, 12) * 0.01, dtype=tf.float32)
bias = tf.Variable(np.random.randn(1, 1, 1, ch_h) * 0.01, dtype=tf.float32)
# weights from inputs to q layer (~reward in Bellman equation)
w0 = tf.Variable(np.random.randn(5, 5, 13, ch_h) * 0.01, dtype=tf.float32)
w1 = tf.Variable(np.random.randn(3, 3, ch_h, 100) * 0.01, dtype=tf.float32)
w2 = tf.Variable(np.random.randn(3, 3, 100, 100) * 0.01, dtype=tf.float32)
w3 = tf.Variable(np.random.randn(3, 3, 100, 100) * 0.01, dtype=tf.float32)
w4 = tf.Variable(np.random.randn(3, 3, 100, 50) * 0.01, dtype=tf.float32)
w5 = tf.Variable(np.random.randn(3, 3, 50, 50) * 0.01, dtype=tf.float32)
w6 = tf.Variable(np.random.randn(3, 3, 50, 50) * 0.01, dtype=tf.float32)
w = tf.Variable(np.random.randn(3, 3, 50, ch_q) * 0.01, dtype=tf.float32)
w_o = tf.Variable(np.random.randn(ch_q, 8) * 0.01, dtype=tf.float32)
# initial conv layer over image+reward prior
h1 = tf.nn.relu(conv2d_flipkernel(X[:, :, :, 0:2], w11, name="h1"))
pool0 = tf.nn.max_pool(h1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
h2 = tf.nn.relu(conv2d_flipkernel(pool0, w22, name="h2"))
pool1 = tf.nn.max_pool(h2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
pool00 = tf.nn.max_pool(X[:, :, :, 2:3], [1, 4, 4, 1], [
1, 4, 4, 1], padding='SAME')
r0 = tf.concat([pool1, pool00], axis=-1)
h = tf.nn.relu(conv2d_flipkernel(r0, w0, name="h0") + bias)
r = tf.nn.relu(conv2d_flipkernel(h, w1, name="r"))
r1 = tf.nn.relu(conv2d_flipkernel(r, w2, name="r1"))
r2 = tf.nn.relu(conv2d_flipkernel(r1, w3, name="r2"))
r3 = tf.nn.relu(conv2d_flipkernel(r2, w4, name="r3"))
r4 = tf.nn.relu(conv2d_flipkernel(r3, w5, name="r4"))
r5 = tf.nn.relu(conv2d_flipkernel(r4, w6, name="r5"))
#r6 = tf.nn.relu(conv2d_flipkernel(r5, w7, name="r6"))
#r7 = tf.nn.relu(conv2d_flipkernel(r6, w8, name="r7"))
q = tf.nn.relu(conv2d_flipkernel(r5, w, name="q"))
q = tf.transpose(q, perm=[0, 3, 1, 2])
bs = tf.shape(q)[0]
rprn = tf.reshape(
tf.tile(tf.reshape(tf.range(bs), [-1, 1]), [1, state_batch_size]), [-1])
ins1 = tf.cast(tf.reshape(S1, [-1]), tf.int32)
ins2 = tf.cast(tf.reshape(S2, [-1]), tf.int32)
idx_in = tf.transpose(tf.stack([ins1, ins2, rprn]), [1, 0])
q_out = tf.gather_nd(tf.transpose(q, [2, 3, 0, 1]), idx_in, name="q_out")
logits = tf.matmul(q_out, w_o)
output = tf.nn.softmax(logits, name="output")
return logits, output
| 49.577236
| 148
| 0.613726
|
094e4b7e0a3406fe3c96e42e237dfb23512df2aa
| 7,084
|
py
|
Python
|
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
|
kklimonda/neutron
|
ccdddad358a4bf802d59b3fbbfe88a1e9881c96c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.plugins.ml2 import api
from oslo_log import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers import mech_agent
from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \
import exceptions as exc
from neutron.services.qos.drivers.sriov import driver as sriov_qos_driver
LOG = log.getLogger(__name__)
FLAT_VLAN = 0
class SriovNicSwitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Mechanism Driver for SR-IOV capable NIC based switching.
The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the
sriovNicSwitch L2 agent depending on configuration option.
Port binding with this driver may require the sriovNicSwitch agent
to be running on the port's host, and that agent to have connectivity
to at least one segment of the port's network.
L2 agent is not essential for port binding; port binding is handled by
VIF Driver via libvirt domain XML.
L2 Agent presents in order to manage port update events.
"""
def __init__(self,
agent_type=constants.AGENT_TYPE_NIC_SWITCH,
vif_details={portbindings.CAP_PORT_FILTER: False},
supported_vnic_types=[portbindings.VNIC_DIRECT,
portbindings.VNIC_MACVTAP,
portbindings.VNIC_DIRECT_PHYSICAL]):
"""Initialize base class for SriovNicSwitch L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
# NOTE(ndipanov): PF passthrough requires a different vif type
self.vnic_type_for_vif_type = (
{vtype: portbindings.VIF_TYPE_HOSTDEV_PHY
if vtype == portbindings.VNIC_DIRECT_PHYSICAL
else portbindings.VIF_TYPE_HW_VEB
for vtype in self.supported_vnic_types})
self.vif_details = vif_details
sriov_qos_driver.register()
def get_allowed_network_types(self, agent):
return (p_const.TYPE_FLAT, p_const.TYPE_VLAN)
def get_mappings(self, agent):
return agent['configurations'].get('device_mappings', {})
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
if vnic_type == portbindings.VNIC_DIRECT_PHYSICAL:
# Physical functions don't support things like QoS properties,
# spoof checking, etc. so we might as well side-step the agent
# for now. The agent also doesn't currently recognize non-VF
# PCI devices so we won't get port status change updates
# either. This should be changed in the future so physical
# functions can use device mapping checks and the plugin can
# get port status updates.
for segment in context.segments_to_bind:
if self.try_to_bind_segment_for_agent(context, segment,
agent=None):
break
return
for agent in context.host_agents(self.agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if self.try_to_bind_segment_for_agent(context, segment,
agent):
return
else:
LOG.warning("Attempting to bind with dead agent: %s", agent)
def try_to_bind_segment_for_agent(self, context, segment, agent):
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_DIRECT)
vif_type = self.vnic_type_for_vif_type.get(
vnic_type, portbindings.VIF_TYPE_HW_VEB)
if not self.check_segment_for_agent(segment, agent):
return False
port_status = (constants.PORT_STATUS_ACTIVE if agent is None
else constants.PORT_STATUS_DOWN)
context.set_binding(segment[api.ID],
vif_type,
self._get_vif_details(segment),
port_status)
LOG.debug("Bound using segment: %s", segment)
return True
def check_segment_for_agent(self, segment, agent=None):
"""Check if segment can be bound.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind or None
:returns: True if segment can be bound for agent
"""
network_type = segment[api.NETWORK_TYPE]
if network_type in self.get_allowed_network_types(agent):
if agent:
mappings = self.get_mappings(agent)
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s ",
{'segment': segment, 'mappings': mappings})
return segment[api.PHYSICAL_NETWORK] in mappings
return True
return False
def check_vlan_transparency(self, context):
"""SR-IOV driver vlan transparency support."""
return True
def _get_vif_details(self, segment):
network_type = segment[api.NETWORK_TYPE]
if network_type == p_const.TYPE_FLAT:
vlan_id = FLAT_VLAN
elif network_type == p_const.TYPE_VLAN:
vlan_id = segment[api.SEGMENTATION_ID]
else:
raise exc.SriovUnsupportedNetworkType(net_type=network_type)
vif_details = self.vif_details.copy()
vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
return vif_details
| 44.275
| 79
| 0.643139
|
f60cb9d1817a748eca8fa4b430e8ac5f69448329
| 1,047
|
py
|
Python
|
pestudio_extractor.py
|
team-missing-one/malware-predictor
|
5dbe6a04f57469527818e06dc3c03ff92cd982bb
|
[
"MIT"
] | null | null | null |
pestudio_extractor.py
|
team-missing-one/malware-predictor
|
5dbe6a04f57469527818e06dc3c03ff92cd982bb
|
[
"MIT"
] | 10
|
2021-12-11T14:58:45.000Z
|
2021-12-16T14:59:14.000Z
|
pestudio_extractor.py
|
team-missing-one/malware-predictor
|
5dbe6a04f57469527818e06dc3c03ff92cd982bb
|
[
"MIT"
] | null | null | null |
import numpy as np
import glob
import copy
import data_processor
from scipy import stats
def guess_imputation_vector():
ret = []
pathes = [
data_processor.get_train_path("PESTUDIO"),
data_processor.get_valid_path("PESTUDIO")
]
for i in pathes:
for fname in glob.glob(i + '*'):
key = fname[:-5].split('/')[-1]
# print(key)
ret.append(copy.deepcopy(data_processor.PestudioParser(fname).process_report()))
ret = np.array(ret)
# return np.mean(ret, axis=0)
return stats.mode(ret)[0].tolist()[0]
def write_imputation_vector(val: list):
f = open("util_pestudio/imputation.txt", 'w')
for i in val:
f.write(str(i) + '\n')
f.close()
def read_imputation_vector():
ret = []
f = open("util_pestudio/imputation.txt", 'r')
txt = f.readlines()
for line in txt:
ret.append(float(line.rstrip()))
return ret
if __name__ == "__main__":
res = guess_imputation_vector()
print(res)
write_imputation_vector(res)
| 22.76087
| 92
| 0.620821
|
560b0af5a907f6052d2fb9aa63be7d7efd66dd4e
| 702
|
py
|
Python
|
tests/test_precount.py
|
akiomik/precountify
|
521aac23bb0a92b0de961c46d9cc2a4d0a99b24e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_precount.py
|
akiomik/precountify
|
521aac23bb0a92b0de961c46d9cc2a4d0a99b24e
|
[
"Apache-2.0"
] | 8
|
2020-03-24T18:09:15.000Z
|
2020-12-17T02:36:28.000Z
|
tests/test_precount.py
|
akiomik/precountify
|
521aac23bb0a92b0de961c46d9cc2a4d0a99b24e
|
[
"Apache-2.0"
] | null | null | null |
import os
from precountify.click import Click
from precountify.precount import Precount
def test_prepend():
# TODO
pass
def test_from_click():
filename = os.path.join(os.path.dirname(__file__), 'fixtures/click.wav')
click = Click(filename, 44100, 120, False)
precount = Precount.from_click(click, 4, 2, 0)
expected_shape = (
click.audio.data.shape[0],
click.audio.data.shape[1] * Precount.n_beats(4, 2, 0)
)
assert precount.audio.data.shape == expected_shape
def test_n_beats():
assert Precount.n_beats(4, 2, 0) == 8
assert Precount.n_beats(3, 2, 0) == 6
assert Precount.n_beats(4, 1, 0) == 4
assert Precount.n_beats(4, 2, 1) == 7
| 25.071429
| 76
| 0.663818
|
b615a82a80cd41e6f0872638a603951f768e41ca
| 6,962
|
py
|
Python
|
ignite/engine/__init__.py
|
nzare/ignite
|
002b595daa8a8345286c5e096c33e278948686a7
|
[
"BSD-3-Clause"
] | 1
|
2020-08-29T16:49:36.000Z
|
2020-08-29T16:49:36.000Z
|
ignite/engine/__init__.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | 5
|
2020-08-29T16:49:48.000Z
|
2020-08-29T17:05:54.000Z
|
ignite/engine/__init__.py
|
alxlampe/ignite
|
b53c6aeef87754b3cd3638c91172b386dc73af12
|
[
"BSD-3-Clause"
] | 1
|
2020-10-15T06:21:01.000Z
|
2020-10-15T06:21:01.000Z
|
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.engine.deterministic import DeterministicEngine
from ignite.engine.engine import Engine
from ignite.engine.events import CallableEventWithFilter, EventEnum, Events, State
from ignite.metrics import Metric
from ignite.utils import convert_tensor
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
__all__ = [
"State",
"create_supervised_trainer",
"create_supervised_evaluator",
"Engine",
"DeterministicEngine",
"Events",
"EventEnum",
"CallableEventWithFilter",
]
def _prepare_batch(
batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
):
"""Prepare batch for training: pass to a device with options.
"""
x, y = batch
return (
convert_tensor(x, device=device, non_blocking=non_blocking),
convert_tensor(y, device=device, non_blocking=non_blocking),
)
def create_supervised_trainer(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
deterministic: bool = False,
) -> Engine:
"""Factory function for creating a trainer for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
optimizer (`torch.optim.Optimizer`): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU or TPU.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
deterministic (bool, optional): if True, returns deterministic engine of type
:class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine`
(default: False).
Note:
`engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
of the processed batch by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
Returns:
Engine: a trainer engine with supervised update function.
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
if on_tpu and not idist.has_xla_support:
raise RuntimeError("In order to run on TPU, please install PyTorch XLA")
def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
if on_tpu:
xm.optimizer_step(optimizer, barrier=True)
else:
optimizer.step()
return output_transform(x, y, y_pred, loss)
trainer = Engine(_update) if not deterministic else DeterministicEngine(_update)
return trainer
def create_supervised_evaluator(
model: torch.nn.Module,
metrics: Optional[Dict[str, Metric]] = None,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred: (y_pred, y),
) -> Engine:
"""
Factory function for creating an evaluator for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Note:
`engine.state.output` for this engine is defind by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
- `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
Returns:
Engine: an evaluator engine with supervised inference function.
"""
metrics = metrics or {}
def _inference(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return output_transform(x, y, y_pred)
evaluator = Engine(_inference)
for name, metric in metrics.items():
metric.attach(evaluator, name)
return evaluator
| 40.242775
| 120
| 0.683424
|
b1299386bbce2a6b69206a6576a63e6cff3a39dc
| 4,385
|
py
|
Python
|
aalh_iit_celebrations_001/populate-format-extent-columns.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_celebrations_001/populate-format-extent-columns.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_celebrations_001/populate-format-extent-columns.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_celebrations_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 47
maximumcol = 47
minimumrow = 7
maximumrow = 206
iterationrow = 7
targetcol = 47
formatcol = 33
extentcol = 34
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
formatvar = ws.cell(row=iterationrow, column=targetcol).value
print(formatvar)
if formatvar == None:
print('FIELD = NONE')
elif formatvar.find('Negative') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Negatives'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('negative') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Negatives'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('Slide') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Slides'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('slide') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Slides'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('Post') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Postcard'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('post') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Postcard'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('Draw') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Drawings'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('draw') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Drawings'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('Black') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Black and white photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('black') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Black and white photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('b&w') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Black and white photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('B&W') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Black and white photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('Color') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Color photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
elif formatvar.find('color') != -1:
ws.cell(row=iterationrow, column=formatcol).value = 'Picture; Color photograph'
print(ws.cell(row=iterationrow, column=formatcol).value)
else :
ws.cell(row=iterationrow, column=formatcol).value = 'Picture'
print(ws.cell(row=iterationrow, column=formatcol).value)
for cell in row:
extentvar = ws.cell(row=iterationrow, column=targetcol).value
if extentvar == None:
print('FIELD = NONE')
elif extentvar != None:
extentvar1 = extentvar.split(';')
for item in extentvar1:
if item.find('Ex') != -1:
extentvar2 = item
extentvar3 = extentvar2.replace('Extent: ','')
extentvar4 = extentvar3.strip()
ws.cell(row=iterationrow, column=extentcol).value = extentvar4
print(extentvar4)
else :
print('EXTENT = CONFUSING')
iterationrow = iterationrow + 1
wb.save('aalh_iit_celebrations_001.xlsx')
| 51.588235
| 105
| 0.619384
|
5b068ced582d0aac2ed2517fce300a21bc904140
| 93,430
|
py
|
Python
|
qa/rpc-tests/p2p-segwit.py
|
litecoineco/litecoineco
|
a0b3c09215ef798abdd0738571a3a36dbfbb6d2d
|
[
"MIT"
] | 1
|
2021-03-12T16:25:16.000Z
|
2021-03-12T16:25:16.000Z
|
qa/rpc-tests/p2p-segwit.py
|
litecoineco/litecoineco
|
a0b3c09215ef798abdd0738571a3a36dbfbb6d2d
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-segwit.py
|
litecoineco/litecoineco
|
a0b3c09215ef798abdd0738571a3a36dbfbb6d2d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
'''
SegWit p2p test.
'''
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
self.last_getheaders = None
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
# LitecoinEco: Blocks with nVersion < VB_TOP_BITS are rejected
# block2 = self.build_next_block(nVersion=4)
# block2.solve()
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_getdata.inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# LitecoinEco: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def test_reject_blocks(self):
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(nVersion=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_reject_blocks()
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| 45.642404
| 143
| 0.658001
|
13621de3879a3964706c178c2e3c444f97abdac2
| 628
|
py
|
Python
|
server/users/backends.py
|
Sonatrix/reango
|
5533bb126a5972f7f4124c6e9a26c6207596ff80
|
[
"MIT"
] | 60
|
2016-12-12T19:46:41.000Z
|
2019-04-29T05:09:50.000Z
|
server/users/backends.py
|
Sonatrix/reango
|
5533bb126a5972f7f4124c6e9a26c6207596ff80
|
[
"MIT"
] | 28
|
2016-11-06T19:25:38.000Z
|
2018-06-11T22:43:01.000Z
|
server/users/backends.py
|
ncrmro/ango
|
15bca070ed01ec8fa885a224305d1ac67d458b47
|
[
"MIT"
] | 14
|
2016-11-30T22:01:12.000Z
|
2019-03-07T22:45:09.000Z
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import check_password
class UserModelEmailBackend(ModelBackend):
def authenticate(self, username="", password="", **kwargs):
try:
user = get_user_model().objects.get(email__iexact=username)
if check_password(password, user.password):
return user
else:
return None
except get_user_model().DoesNotExist:
# No user was found, return None - triggers default login failed
return None
| 34.888889
| 76
| 0.667197
|
4b4834e647e7ffea388f14c735898103d2763386
| 1,933
|
py
|
Python
|
main.py
|
ovsartem/json_navigator
|
89d41e9ea3c351e74e418062d89f5e8a6e6bb111
|
[
"MIT"
] | null | null | null |
main.py
|
ovsartem/json_navigator
|
89d41e9ea3c351e74e418062d89f5e8a6e6bb111
|
[
"MIT"
] | null | null | null |
main.py
|
ovsartem/json_navigator
|
89d41e9ea3c351e74e418062d89f5e8a6e6bb111
|
[
"MIT"
] | null | null | null |
import json
def get_info(path):
"""
Reads json file
"""
with open("frienfs_list_Obama.json", 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def dictionary(element):
"""
Function to work with dictionary
"""
all_elements = list(element.keys())
print("This object is a dictionary. Here are all keys.")
for i in range(len(all_elements)):
print(f"{i+1}) {all_elements[i]}")
choice = int(input("Type the humber of the key: "))
if isinstance(element[all_elements[choice-1]], list):
return element[all_elements[choice-1]][0]
return element[all_elements[choice-1]]
def _list(element):
"""
Function to work with list
"""
print("This object is a dictionary. Here are all elements.")
for i in range(len(element)):
print(f"{i+1}) {element[i]}")
choice = int(input("Type the humber of the element: "))
return element[choice-1]
def other_elements(element):
"""
Function to work with str,int,float
"""
print(element)
return 111
def main(element):
"""
Main function to organize all types
"""
if isinstance(element, dict):
return dictionary(element)
elif isinstance(element, list):
return _list(element)
elif isinstance(element, tuple):
return _list(element)
else:
return other_elements(element)
def _work():
"""
Cycles the main function. Here you can change the path of json file.
"""
print("This is json file navigator. You can leave whenever you want.")
data = get_info("frienfs_list_Obama.json")
stop = False
while stop != True:
data = main(data)
if data == 111:
stop = True
if __name__ == '__main__':
start = True
while start != False:
_work()
move = input("Do you want to repeat? +/: ")
if move == "-":
start = False
| 24.1625
| 74
| 0.602173
|
d867838c31b8010793298ab7616e80742167f681
| 5,469
|
py
|
Python
|
py2app/bundletemplate/lib/site.py
|
MAKOMO/py2app
|
942763aaffc2da553db743a2ebb5b3ae96acdb64
|
[
"MIT"
] | null | null | null |
py2app/bundletemplate/lib/site.py
|
MAKOMO/py2app
|
942763aaffc2da553db743a2ebb5b3ae96acdb64
|
[
"MIT"
] | null | null | null |
py2app/bundletemplate/lib/site.py
|
MAKOMO/py2app
|
942763aaffc2da553db743a2ebb5b3ae96acdb64
|
[
"MIT"
] | null | null | null |
"""
Append module search paths for third-party packages to sys.path.
This is stripped down and customized for use in py2app applications
"""
import sys
# os is actually in the zip, so we need to do this here.
# we can't call it python24.zip because zlib is not a built-in module (!)
_libdir = "/lib/python" + sys.version[:3]
_parent = "/".join(__file__.split("/")[:-1])
if not _parent.endswith(_libdir):
_parent += _libdir
sys.path.append(_parent + "/site-packages.zip")
# Stuffit decompresses recursively by default, that can mess up py2app bundles,
# add the uncompressed site-packages to the path to compensate for that.
sys.path.append(_parent + "/site-packages")
ENABLE_USER_SITE = False
USER_SITE = None
USER_BASE = None
def _import_os():
global os
import os # noqa: E402
_import_os()
try:
basestring
except NameError:
basestring = str
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
for m in sys.modules.values():
f = getattr(m, "__file__", None)
if isinstance(f, basestring) and os.path.exists(f):
m.__file__ = os.path.abspath(m.__file__)
del m
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
_dirs_in_sys_path = {}
dir = dircase = None # sys.path may be empty at this point
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if dircase not in _dirs_in_sys_path:
L.append(dir)
_dirs_in_sys_path[dircase] = 1
sys.path[:] = L
del dir, dircase, L
_dirs_in_sys_path = None
def _init_pathinfo():
global _dirs_in_sys_path
_dirs_in_sys_path = d = {}
for dir in sys.path:
if dir and not os.path.isdir(dir):
continue
dir, dircase = makepath(dir)
d[dircase] = 1
def addsitedir(sitedir):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if sitedircase not in _dirs_in_sys_path:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name[-4:] == os.extsep + "pth":
addpackage(sitedir, name)
if reset:
_dirs_in_sys_path = None
def addpackage(sitedir, name):
global _dirs_in_sys_path
if _dirs_in_sys_path is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
with open(fullname) as f:
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == "#":
continue
if dir.startswith("import"):
exec(dir)
continue
if dir[-1] == "\n":
dir = dir[:-1]
dir, dircase = makepath(sitedir, dir)
if dircase not in _dirs_in_sys_path and os.path.exists(dir):
sys.path.append(dir)
_dirs_in_sys_path[dircase] = 1
except IOError:
return
if reset:
_dirs_in_sys_path = None
def _get_path(userbase):
version = sys.version_info
if sys.platform == "darwin" and getattr(sys, "_framework", None):
return "%s/lib/python/site-packages" % (userbase,)
return "%s/lib/python%d.%d/site-packages" % (userbase, version[0], version[1])
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
if env_base:
return env_base
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
if getattr(sys, "_framework", None):
return joinuser("~", "Library", sys._framework, "%d.%d" % sys.version_info[:2])
return joinuser("~", ".local")
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is None:
USER_BASE = _getuserbase()
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
userbase = getuserbase() # this will also set USER_BASE
if USER_SITE is None:
USER_SITE = _get_path(userbase)
return USER_SITE
#
# Run custom site specific code, if available.
#
try:
import sitecustomize # noqa: F401
except ImportError:
pass
#
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
#
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
import builtins # noqa: E402
import _sitebuiltins # noqa: E402
builtins.help = _sitebuiltins._Helper()
builtins.quit = _sitebuiltins.Quitter('quit', 'Ctrl-D (i.e. EOF)')
builtins.exit = _sitebuiltins.Quitter('exit', 'Ctrl-D (i.e. EOF)')
| 26.548544
| 87
| 0.640519
|
b6c0dee51be70674d12dd64c650c880877c6882d
| 2,356
|
py
|
Python
|
mason/engines/metastore/models/table/table.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 4
|
2021-04-12T17:49:34.000Z
|
2022-01-23T19:54:29.000Z
|
mason/engines/metastore/models/table/table.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 24
|
2021-04-30T18:40:25.000Z
|
2021-05-12T20:52:06.000Z
|
mason/engines/metastore/models/table/table.py
|
kyprifog/mason
|
bf45672124ef841bc16216c293034f4ccc506621
|
[
"Apache-2.0"
] | 3
|
2021-04-12T19:40:43.000Z
|
2021-09-07T21:56:36.000Z
|
from datetime import datetime
from typing import Optional, List, Dict, Union
import pandas as pd
from mason.engines.metastore.models.schemas.text import TextSchema
from mason.clients.responsable import Responsable
from mason.clients.response import Response
from mason.engines.metastore.models.schemas.schema import Schema
from mason.engines.storage.models.path import Path
from dask.dataframe.core import DataFrame as DDataFrame
from pandas import DataFrame as PDataFrame
class Table(Responsable):
def __init__(self, name: str, schema: Schema, created_at: Optional[datetime] = None, created_by: Optional[str] = None, database_name: Optional[str] = None, paths: List[Path] = [], source_path: Optional[Path] = None):
self.name = name
self.database_name = database_name
self.schema = schema
self.created_at = created_at
self.created_by = created_by
self.paths = paths
self.source_path = source_path
lt: Optional[str]
if (isinstance(schema, TextSchema)):
lt = schema.line_terminator
else:
lt = None
self.line_terminator: Optional[str] = lt
def as_df(self) -> PDataFrame:
pd_dict: Dict[str, str] = self.schema.to_pd_dict()
column_names: List[str] = list(pd_dict.keys())
return pd.DataFrame(columns=column_names).astype(pd_dict)
def as_dask_df(self) -> DDataFrame:
import dask.dataframe as dd
return dd.from_pandas(self.as_df(), npartitions=8)
def to_dict(self) -> Dict[str, Union[str, datetime, dict]]:
return {
"Name": self.name,
"CreatedAt": self.created_at or "",
"CreatedBy": self.created_by or "",
"Schema": self.schema.to_dict()
}
def to_response(self, response: Response):
response.add_data(self.to_dict())
return response
def column_names(self) -> List[str]:
return self.schema.column_names()
class TableList(Responsable):
def __init__(self, tables: List[Table]):
self.tables = tables
def to_dict(self) -> dict:
return {'Tables': list(map(lambda t: t.to_dict(), self.tables))}
def to_response(self, response: Response):
data = self.to_dict()
response.add_data(data)
return response
| 33.183099
| 220
| 0.654499
|
2a6768d18d00eac1aa5b0854f121267cd9b7dbeb
| 5,747
|
py
|
Python
|
game_seq_embedder/game_seq_embedder/transformers/modeling_tf_camembert.py
|
fuxiAIlab/PMTC
|
bea55d821bc8adf64044194a0b72d8ce913a6213
|
[
"Artistic-1.0-Perl"
] | 4
|
2021-02-28T11:58:18.000Z
|
2022-02-03T03:26:45.000Z
|
game_seq_embedder/game_seq_embedder/transformers/modeling_tf_camembert.py
|
fuxiAIlab/PMTC
|
bea55d821bc8adf64044194a0b72d8ce913a6213
|
[
"Artistic-1.0-Perl"
] | 2
|
2022-03-16T13:57:37.000Z
|
2022-03-16T14:00:51.000Z
|
game_seq_embedder/game_seq_embedder/transformers/modeling_tf_camembert.py
|
fuxiAIlab/PMTC
|
bea55d821bc8adf64044194a0b72d8ce913a6213
|
[
"Artistic-1.0-Perl"
] | 2
|
2020-07-16T16:20:59.000Z
|
2021-08-08T20:22:47.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 CamemBERT model. """
from .configuration_camembert import CamembertConfig
from .file_utils import add_start_docstrings
from .modeling_tf_roberta import (
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaModel,
)
from .utils import logging
logger = logging.get_logger(__name__)
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all CamemBERT models at https://huggingface.co/models?filter=camembert
]
CAMEMBERT_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
@add_start_docstrings(
"The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertModel(TFRobertaModel):
"""
This class overrides :class:`~transformers.TFRobertaModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
@add_start_docstrings(
"""CamemBERT Model with a `language modeling` head on top. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForMaskedLM(TFRobertaForMaskedLM):
"""
This class overrides :class:`~transformers.TFRobertaForMaskedLM`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
@add_start_docstrings(
"""CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification):
"""
This class overrides :class:`~transformers.TFRobertaForSequenceClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
@add_start_docstrings(
"""CamemBERT Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForTokenClassification(TFRobertaForTokenClassification):
"""
This class overrides :class:`~transformers.TFRobertaForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
@add_start_docstrings(
"""CamemBERT Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForMultipleChoice(TFRobertaForMultipleChoice):
"""
This class overrides :class:`~transformers.TFRobertaForMultipleChoice`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
@add_start_docstrings(
"""CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
CAMEMBERT_START_DOCSTRING,
)
class TFCamembertForQuestionAnswering(TFRobertaForQuestionAnswering):
"""
This class overrides :class:`~transformers.TFRobertaForQuestionAnswering`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = CamembertConfig
| 39.634483
| 224
| 0.753437
|
2338c143f1a4ff42cfb5f8d8230ffe0d86677e3a
| 4,316
|
py
|
Python
|
Preference2/src/main/java/voidblue/preference/demo/Preference/Preference.py
|
voidblue/PreferenceWebPage
|
7b36a6b0df09b84aa46fb63f9b191c84b46509d6
|
[
"Apache-2.0"
] | null | null | null |
Preference2/src/main/java/voidblue/preference/demo/Preference/Preference.py
|
voidblue/PreferenceWebPage
|
7b36a6b0df09b84aa46fb63f9b191c84b46509d6
|
[
"Apache-2.0"
] | null | null | null |
Preference2/src/main/java/voidblue/preference/demo/Preference/Preference.py
|
voidblue/PreferenceWebPage
|
7b36a6b0df09b84aa46fb63f9b191c84b46509d6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import openpyxl
import tensorflow as tf
import numpy.core._methods
import numpy.lib.format
from Softmax import Softmax
import os
import sys
if getattr(sys, 'frozen', False):
# frozen
dir_ = os.path.dirname(sys.executable)
else:
# unfrozen
dir_ = os.path.dirname(os.path.realpath(__file__))
class Pref(Softmax):
def process_ranking(self, target_data = None ):
xlist = []
first = 0
second = 0
third = 0
# print(target_data)
for y in target_data:
first = 0
second = 0
third = 0
forth = 0
fifth = 0
for item in y:
# if xxx > third:
# third = xxx
# if third > second:
# temp = second
# second = third
# third = temp
# if second > first:
# temp = first
# first = second
# second = temp
#
# if item > first:
# first = item
# if third > second:
# temp = second
# second = third
# third = temp
# if second > first:
# temp = first
# first = second
# second = temp
if item > fifth:
fifth = item
if fifth > forth:
temp = forth
forth = fifth
fifth = temp
if forth > third:
temp = third
third = forth
forth = temp
if third > second:
temp = second
second = third
third = temp
if second > first:
temp = first
first = second
second = temp
if len(list(np.where(y == first))) >= 1:
xlist.append(list(np.where(y == first)[0]))
else:
xlist.append([0])
if len(list(np.where(y == second))) >= 1:
xlist.append(list(np.where(y == second)[0]))
else:
xlist.append([0])
if len(list(np.where(y == third))) >= 1:
xlist.append(list(np.where(y == third)[0]))
else:
xlist.append([0])
xlist.append(list(np.where(y == forth)[0]))
xlist.append(list(np.where(y == fifth)[0]))
return list(xlist)
def target_ranking(self, x_data):
temp = x_data
for i, w in enumerate(self.weights):
temp = tf.nn.softmax(tf.add(tf.matmul(temp,-w),self.bias[i]))
return self.sess.run(temp)
def return_rank_accuracy(self, y_data ,data):
try:
wb = openpyxl.load_workbook("테스트결과" + '.xlsx')
except:
wb = openpyxl.Workbook()
wb.save("테스트결과" + ".xlsx")
ws = wb.active
accuracy = 0
sametime = [0, 0, 0, 0, 0]
for i in range(len(y_data)):
acc = 0
ws.cell(row=i +2, column= 14).value = 1
if y_data[i][data[5*i]] == 1:
accuracy += 1
sametime[0]+=1
acc += 1
elif y_data[i][data[(5 * i) + 1]] == 1:
accuracy += 1
sametime[1] += 1
acc += 1
elif y_data[i][data[(5 * i) + 2]] == 1:
accuracy += 1
sametime[2] += 1
acc += 1
elif y_data[i][data[(5 * i) + 3]] == 1:
accuracy += 1
sametime[3] += 1
acc += 1
elif y_data[i][data[(5 * i) + 4]] == 1:
accuracy += 1
sametime[4] += 1
acc += 1
else:
ws.cell(row=i + 2, column= 14).value = 0
# if(acc == 3):
# print(i, "번쨰 데이터가 모두 적중 (0부터 시작)")
# break
# print(sametime)
wb.save("테스트결과" + '.xlsx')
wb.close()
accuracy /= len(y_data)
print('accuracy = {}'.format(accuracy))
| 30.609929
| 73
| 0.401993
|
8d756e2fb9607ed1a1ee3a35144f1674d20b1170
| 10,192
|
py
|
Python
|
flashfocus/display_protocols/x11.py
|
ardevd/flashfocus
|
694bd97ba30d565c82235c976c7133e28778f0fc
|
[
"MIT"
] | null | null | null |
flashfocus/display_protocols/x11.py
|
ardevd/flashfocus
|
694bd97ba30d565c82235c976c7133e28778f0fc
|
[
"MIT"
] | null | null | null |
flashfocus/display_protocols/x11.py
|
ardevd/flashfocus
|
694bd97ba30d565c82235c976c7133e28778f0fc
|
[
"MIT"
] | null | null | null |
"""Xorg utility code.
All submodules in flashfocus.display_protocols are expected to contain a minimal set of
functions/classes for abstracting across various display protocols. See list in flashfocus.compat
"""
import functools
import logging
from queue import Queue
import struct
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Union
from xcffib.xproto import (
CreateNotifyEvent,
CW,
EventMask,
PropertyNotifyEvent,
WindowClass,
WindowError,
)
from xpybutil import conn, root
from xpybutil.ewmh import (
get_active_window,
get_client_list,
get_current_desktop,
get_wm_desktop,
get_wm_state,
get_wm_window_opacity,
set_wm_window_opacity_checked,
)
from xpybutil.icccm import get_wm_class, set_wm_class_checked, set_wm_name_checked
import xpybutil.window
from xpybutil.util import get_atom_name, PropertyCookieSingle
from flashfocus.display import WMEvent, WMEventType
from flashfocus.errors import WMError
from flashfocus.util import match_regex
Event = Union[CreateNotifyEvent, PropertyNotifyEvent]
def ignore_window_error(function: Callable) -> Callable:
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except WindowError:
pass
return wrapper
class Window:
def __init__(self, window_id: int) -> None:
"""Represents an Xorg window.
Parameters
----------
window_id
The XORG window ID
Attributes
----------
id
The XORG window ID
"""
if window_id is None:
raise WMError("Undefined window")
self.id = window_id
self._properties: Dict = dict()
def __eq__(self, other) -> bool:
if type(self) != type(other):
raise TypeError("Arguments must be of the same type")
return self.id == other.id
def __ne__(self, other) -> bool:
if type(self) != type(other):
raise TypeError("Arguments must be of the same type")
return self.id != other.id
@property
def properties(self) -> Dict:
"""Get a dictionary with the window class and instance."""
# Properties are cached after the first call to this function and so might not necessarily
# be correct if the properties are changed between calls. This is acceptable for our
# purposes because Windows are short-lived objects.
if not self._properties:
try:
reply = get_wm_class(self.id).reply()
except (struct.error, WindowError) as e:
raise WMError("Invalid window: %s", self.id) from e
try:
self._properties = {"window_id": reply[0], "window_class": reply[1]}
except TypeError:
pass
return self._properties
def match(self, criteria: Dict) -> bool:
"""Determine whether the window matches a set of criteria.
Parameters
----------
criteria
Dictionary of regexes of the form {PROPERTY: REGEX} e.g {"window_id": r"termite"}
"""
if not criteria.get("window_id") and not criteria.get("window_class"):
return True
for prop in ["window_id", "window_class"]:
# https://github.com/fennerm/flashfocus/issues/43
# I'm not 100 % sure but this issue seems to indicate that in some WMs a window might
# have an ID but not a class.
if (
criteria.get(prop)
and self.properties.get(prop)
and not match_regex(criteria[prop], self.properties[prop])
):
return False
return True
@property
def opacity(self) -> float:
return get_wm_window_opacity(self.id).reply()
@ignore_window_error
def set_opacity(self, opacity: Optional[float]) -> None:
# If opacity is None just silently ignore the request
if opacity is not None:
cookie = set_wm_window_opacity_checked(self.id, opacity)
cookie.check()
@ignore_window_error
def set_class(self, title: str, class_: str) -> None:
set_wm_class_checked(self.id, title, class_).check()
@ignore_window_error
def set_name(self, name: str) -> None:
set_wm_name_checked(self.id, name).check()
@ignore_window_error
def destroy(self) -> None:
try:
conn.core.DestroyWindow(self.id, True).check()
except WindowError as e:
raise WMError from e
@ignore_window_error
def is_fullscreen(self) -> bool:
wm_states = get_wm_state(self.id).reply()
# wm_states might be null in some WMs - #29
if wm_states:
for wm_state in wm_states:
if get_atom_name(wm_state) == "_NET_WM_STATE_FULLSCREEN":
return True
return False
def _create_message_window() -> Window:
"""Create a hidden window for sending X client-messages.
The window's properties can be used to send messages between threads.
Returns
-------
int
An X-window id.
"""
setup = conn.get_setup()
window_id = conn.generate_id()
conn.core.CreateWindow(
depth=setup.roots[0].root_depth,
wid=window_id,
parent=root,
x=0,
y=0,
width=1,
height=1,
border_width=0,
_class=WindowClass.InputOutput,
visual=setup.roots[0].root_visual,
value_mask=CW.EventMask,
value_list=[EventMask.PropertyChange],
is_checked=True,
).check()
return Window(window_id)
class DisplayHandler(Thread):
"""Parse events from the X-server and pass them on to FlashServer"""
def __init__(self, queue: Queue) -> None:
# This is set to True when initialization of the thread is complete and its ready to begin
# the event loop
self.ready = False
super(DisplayHandler, self).__init__()
# Queue of messages to be handled by the flash server
self.queue: Queue = queue
# This property is set by the server during shutdown and signals that the display handler
# should disconnect from XCB
self.keep_going: bool = True
# In order to interrupt the event loop we need to map a special message-passing window.
# When it comes time to exit we set the name of the window to 'KILL'. This is then
# picked up as an event in the event loop. See https://xcb.freedesktop.org/tutorial/events/
self.message_window: Window = _create_message_window()
def run(self) -> None:
# PropertyChange is for detecting changes in focus
# SubstructureNotify is for detecting new mapped windows
xpybutil.window.listen(xpybutil.root, "PropertyChange", "SubstructureNotify")
# Also listen to property changes in the message window
xpybutil.window.listen(self.message_window.id, "PropertyChange")
self.ready = True
while self.keep_going:
event = conn.wait_for_event()
if isinstance(event, PropertyNotifyEvent):
self._handle_property_change(event)
elif isinstance(event, CreateNotifyEvent):
self._handle_new_mapped_window(event)
def stop(self) -> None:
set_wm_name_checked(self.message_window.id, "KILL").check()
self.keep_going = False
self.join()
self.message_window.destroy()
def queue_window(self, window: Window, event_type: WMEventType) -> None:
self.queue.put(WMEvent(window=window, event_type=event_type))
def _handle_new_mapped_window(self, event: Event) -> None:
logging.info(f"Window {event.window} mapped...")
if event.window is not None:
window = Window(event.window)
# Check that window is visible so that we don't accidentally set
# opacity of windows which are not for display. Without this step
# window opacity can become frozen and stop responding to flashes.
if window in list_mapped_windows():
self.queue_window(window, WMEventType.NEW_WINDOW)
else:
logging.info(f"Window {window.id} is not visible, ignoring...")
def _handle_property_change(self, event: Event) -> None:
"""Handle a property change on a watched window."""
atom_name = get_atom_name(event.atom)
if atom_name == "_NET_ACTIVE_WINDOW":
focused_window = get_focused_window()
if focused_window is not None:
logging.info(f"Focus shifted to {focused_window.id}")
self.queue_window(focused_window, WMEventType.FOCUS_SHIFT)
elif atom_name == "WM_NAME" and event.window == self.message_window.id:
# Received kill signal from server -> terminate the thread
self.keep_going = False
@ignore_window_error
def get_focused_window() -> Optional[Window]:
window_id = get_active_window().reply()
if window_id is not None:
return Window(window_id)
else:
return None
def _try_unwrap(cookie: PropertyCookieSingle) -> Optional[Any]:
"""Try reading a reply from the X server, ignoring any errors encountered."""
try:
return cookie.reply()
except WindowError:
return None
@ignore_window_error
def list_mapped_windows(workspace: Optional[int] = None) -> List[Window]:
mapped_window_ids = get_client_list().reply()
if mapped_window_ids is None:
mapped_window_ids = list()
mapped_windows = [Window(wid) for wid in mapped_window_ids if wid is not None]
if workspace is not None:
cookies = [get_wm_desktop(wid) for wid in mapped_window_ids]
workspaces = [_try_unwrap(cookie) for cookie in cookies]
mapped_windows = [win for win, ws in zip(mapped_windows, workspaces) if ws == workspace]
return mapped_windows
@ignore_window_error
def get_focused_workspace() -> int:
return get_current_desktop().reply()
@ignore_window_error
def disconnect_display_conn() -> None:
conn.disconnect()
| 33.636964
| 99
| 0.645899
|
995c3165d08ccedec632bb58689f90632fd7cf13
| 4,481
|
py
|
Python
|
test/test_acquirer.py
|
coleygroup/molpal
|
7b3dd1488af6e734caea527fe2738ff7f6ebfc49
|
[
"MIT"
] | 81
|
2020-12-15T14:28:57.000Z
|
2022-03-14T12:26:00.000Z
|
test/test_acquirer.py
|
ashuein/molpal
|
1e17a0c406516ceaeaf273a6983d06206bcfe76f
|
[
"MIT"
] | 13
|
2021-01-07T17:09:00.000Z
|
2022-03-01T12:45:22.000Z
|
test/test_acquirer.py
|
ashuein/molpal
|
1e17a0c406516ceaeaf273a6983d06206bcfe76f
|
[
"MIT"
] | 23
|
2020-12-15T16:09:48.000Z
|
2022-03-13T11:29:40.000Z
|
import string
import unittest
import numpy as np
from molpal.acquirer import Acquirer
class TestAcquirer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.xs = string.ascii_lowercase
cls.y_means = np.arange(len(cls.xs), dtype=np.double)[::-1]
cls.y_vars = np.zeros(len(cls.xs))
cls.init_size = 10
cls.batch_size = 10
cls.acq = Acquirer(
size=len(cls.xs), init_size=cls.init_size,
batch_size=cls.batch_size, metric='greedy', epsilon=0., seed=0
)
def test_acquire_initial(self):
init_xs = self.acq.acquire_initial(self.xs)
self.assertEqual(len(init_xs), self.init_size)
def test_acquire_initial_set_seed(self):
acq = Acquirer(
size=len(self.xs), init_size=self.init_size,
batch_size=self.batch_size, metric='greedy', epsilon=0., seed=0
)
init_xs_1 = acq.acquire_initial(self.xs)
acq = Acquirer(
size=len(self.xs), init_size=self.init_size,
batch_size=self.batch_size, metric='greedy', epsilon=0., seed=0
)
init_xs_2 = acq.acquire_initial(self.xs)
self.assertEqual(set(init_xs_1), set(init_xs_2))
def test_acquire_initial_None_seed(self):
"""There is a roughly 1-in-2.5E13 chance that the same initial batch
is chosen twice in a row, causing this test to report failure. If this
test fails twice in a row, it is safe to assume that the test is
genuinely failing."""
acq = Acquirer(
size=len(self.xs), init_size=self.init_size,
batch_size=self.batch_size, metric='greedy', epsilon=0., seed=None
)
init_xs_1 = acq.acquire_initial(self.xs)
acq = Acquirer(
size=len(self.xs), init_size=self.init_size,
batch_size=self.batch_size, metric='greedy', epsilon=0., seed=None
)
init_xs_2 = acq.acquire_initial(self.xs)
self.assertNotEqual(set(init_xs_1), set(init_xs_2))
def test_acquire_batch_explored(self):
"""Acquirers should not reacquire old points"""
init_xs = self.acq.acquire_initial(self.xs)
explored = {x: 0. for x in init_xs}
batch_xs = self.acq.acquire_batch(
self.xs, self.y_means, self.y_vars, explored=explored
)
self.assertEqual(len(batch_xs), self.batch_size)
self.assertNotEqual(set(init_xs), set(batch_xs))
def test_acquire_batch_correct_points(self):
"""Acquirers should acquire the top-m points by calculated utility.
This test acquirer uses the greedy metric because the utilities are
equal to the input Y_means. Assuming utility calculation is working
correctly, this test ensures that the acquirer uses these utilities
properly."""
batch_xs = self.acq.acquire_batch(
self.xs, self.y_means, self.y_vars, explored={}
)
self.assertEqual(set(batch_xs),
set(self.xs[:self.batch_size]))
explored = {x: 0. for x in batch_xs}
batch_xs_2 = self.acq.acquire_batch(
self.xs, self.y_means, self.y_vars, explored=explored
)
self.assertEqual(set(batch_xs_2),
set(self.xs[self.batch_size:2*self.batch_size]))
explored_2 = {x: 0. for x in batch_xs_2}
batch_xs_3 = self.acq.acquire_batch(
self.xs, self.y_means, self.y_vars,
explored={**explored, **explored_2}
)
self.assertEqual(set(batch_xs_3),
set(self.xs[2*self.batch_size:]))
def test_acquire_batch_epsilon(self):
"""There is roughly a 1-in-5*10^6 (= nCr(26, 10)) chance that a random
batch is the same as the calculated top-m batch, causing this test to
report a failure. There is a roughly 1-in-2.5E13 chance this test
fails twice in arow. If that happens, it is safe to assume that the
test is genuinely failing."""
acq = Acquirer(
size=len(self.xs), init_size=self.init_size,
batch_size=self.batch_size, metric='greedy', epsilon=1., seed=0
)
explored = {}
batch_xs = acq.acquire_batch(
self.xs, self.y_means, self.y_vars, explored=explored
)
self.assertNotEqual(set(batch_xs),
set(self.xs[:self.batch_size]))
if __name__ == "__main__":
unittest.main()
| 38.299145
| 79
| 0.622852
|
ccf7f4444c9c4febe83ac3db638f182316e29555
| 1,170
|
py
|
Python
|
linearExt.py
|
gcant/pairwise-comparison-BP
|
f995d99440fd37b2850442413672a8e20b9f5b99
|
[
"Apache-2.0"
] | 2
|
2021-10-04T09:48:50.000Z
|
2022-01-13T21:24:43.000Z
|
linearExt.py
|
gcant/pairwise-comparison-BP
|
f995d99440fd37b2850442413672a8e20b9f5b99
|
[
"Apache-2.0"
] | null | null | null |
linearExt.py
|
gcant/pairwise-comparison-BP
|
f995d99440fd37b2850442413672a8e20b9f5b99
|
[
"Apache-2.0"
] | null | null | null |
import ctypes
import numpy as np
import networkx as nx
# C arrays of ints/doubles using numpy
array_int = np.ctypeslib.ndpointer(dtype=ctypes.c_int,ndim=1, flags='CONTIGUOUS')
array_double = np.ctypeslib.ndpointer(dtype=np.double,ndim=1, flags='CONTIGUOUS')
lib = ctypes.cdll.LoadLibrary("./out/LEMP.so")
lib.full_algorithm.argtypes = [
ctypes.c_int,
ctypes.c_int,
array_int,
ctypes.c_int,
ctypes.c_double,
array_double]
lib.full_algorithm.restype = ctypes.c_int
def BP(G, L, tol=10e-10):
M = np.array(list(G.edges())).flatten().astype(ctypes.c_int)
output = np.zeros(L*(G.number_of_nodes()+1) + 4)
s = lib.full_algorithm(
ctypes.c_int(G.number_of_nodes()),
ctypes.c_int(G.number_of_edges()),
M,
ctypes.c_int(L),
ctypes.c_double(tol),
output)
pts = output[:L].copy()
A = output[L:-4].reshape(G.number_of_nodes(),L)
S1 = output[-4]/G.number_of_nodes()
S2 = output[-3]/G.number_of_nodes()
S = output[-2]
logV = output[-1]
density_states = S - G.number_of_nodes()*np.log(2)
return pts, A, S1, S2, density_states, S, logV, s
| 27.857143
| 81
| 0.641026
|
f2f9966821a411eef4fb7167f4a20e8faf90a885
| 5,221
|
py
|
Python
|
python_modules/libraries/dagster-airflow/dagster_airflow_tests/conftest.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow_tests/conftest.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-airflow/dagster_airflow_tests/conftest.py
|
hspak/dagster
|
94cff048d5d757d0fe1d83abe236252a1c86bd41
|
[
"Apache-2.0"
] | null | null | null |
'''Test fixtures for dagster-airflow.
These make very heavy use of fixture dependency and scope. If you're unfamiliar with pytest
fixtures, read: https://docs.pytest.org/en/latest/fixture.html.
'''
# pylint doesn't understand the way that pytest constructs fixture dependnecies
# pylint: disable=redefined-outer-name, unused-argument
import os
import shutil
import subprocess
import tempfile
import uuid
import docker
import pytest
import six
from dagster import check
from dagster.utils import file_relative_path, load_yaml_from_path, mkdir_p, pushd
@pytest.fixture(scope='session')
def git_repository_root():
return six.ensure_str(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
@pytest.fixture(scope='session')
def test_repo_path(git_repository_root): # pylint: disable=redefined-outer-name
return file_relative_path(
__file__,
os.path.join(git_repository_root, '.buildkite', 'images', 'docker', 'test_project'),
)
@pytest.fixture(scope='session')
def environments_path(test_repo_path): # pylint: disable=redefined-outer-name
return os.path.join(test_repo_path, 'test_pipelines', 'environments')
@pytest.fixture(scope='module')
def airflow_home():
'''Check that AIRFLOW_HOME is set, and return it'''
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
'''Context manager for temporary directories.
pytest implicitly wraps in try/except.
'''
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def clean_airflow_home(airflow_home):
'''Ensure that the existing contents of AIRFLOW_HOME do not interfere with test.'''
airflow_dags_path = os.path.join(airflow_home, 'dags')
# Ensure Airflow DAGs folder exists
if not os.path.exists(airflow_dags_path):
os.makedirs(airflow_dags_path)
tempdir_path = tempfile.mkdtemp()
# Move existing DAGs aside for test
dags_files = os.listdir(airflow_dags_path)
for dag_file in dags_files:
shutil.move(os.path.join(airflow_dags_path, dag_file), tempdir_path)
yield
# Clean up DAGs produced by test
shutil.rmtree(airflow_dags_path)
os.makedirs(airflow_dags_path)
# Move original DAGs back
file_paths = os.listdir(tempdir_path)
for file_path in file_paths:
shutil.move(
os.path.join(tempdir_path, file_path), os.path.join(airflow_dags_path, file_path)
)
shutil.rmtree(tempdir_path)
@pytest.fixture(scope='session')
def docker_client():
'''Instantiate a Docker Python client.'''
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
# pylint: disable=protected-access
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='session')
def dagster_docker_image():
assert (
'DAGSTER_DOCKER_IMAGE' in os.environ
), 'DAGSTER_DOCKER_IMAGE must be set in your environment for these tests'
# Will be set in environment by .buildkite/pipeline.py -> tox.ini to:
# ${AWS_ACCOUNT_ID}.dkr.ecr.us-west-1.amazonaws.com/dagster-docker-buildkite:${BUILDKITE_BUILD_ID}-${TOX_PY_VERSION}
return os.environ['DAGSTER_DOCKER_IMAGE']
@pytest.fixture(scope='session')
def build_docker_image(test_repo_path, docker_client, dagster_docker_image):
with pushd(test_repo_path):
subprocess.check_output(['./build.sh'], shell=True)
return dagster_docker_image
@pytest.fixture(scope='session')
def docker_image(docker_client, build_docker_image):
'''Check that the airflow image exists.'''
try:
docker_client.images.get(build_docker_image)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=build_docker_image,
script_path=file_relative_path(__file__, 'test_project/build.sh'),
)
)
return build_docker_image
@pytest.fixture(scope='module')
def dags_path(airflow_home):
'''Abspath to the magic Airflow DAGs folder.'''
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
'''Abspath to the magic Airflow plugins folder.'''
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def environment_dict(s3_bucket, test_repo_path):
env_dict = load_yaml_from_path(os.path.join(test_repo_path, 'env.yaml'))
env_dict['storage'] = {'s3': {'s3_bucket': s3_bucket}}
yield env_dict
@pytest.fixture(scope='session')
def s3_bucket():
yield 'dagster-scratch-80542c2'
@pytest.fixture(scope='session')
def gcs_bucket():
yield 'dagster-scratch-ccdfe1e'
| 30.005747
| 120
| 0.714039
|
9b45df84786d8b4294f3ee937d3f61b66b05730e
| 1,974
|
py
|
Python
|
lib/pybtex-0.19/pybtex/tests/database_test/data.py
|
cabeen/bibfmt
|
a2607506f15249f8e0ee900db103d57afec7dec8
|
[
"MIT"
] | 1
|
2021-02-20T19:53:48.000Z
|
2021-02-20T19:53:48.000Z
|
lib/pybtex-0.19/pybtex/tests/database_test/data.py
|
cabeen/bibfmt
|
a2607506f15249f8e0ee900db103d57afec7dec8
|
[
"MIT"
] | null | null | null |
lib/pybtex-0.19/pybtex/tests/database_test/data.py
|
cabeen/bibfmt
|
a2607506f15249f8e0ee900db103d57afec7dec8
|
[
"MIT"
] | null | null | null |
# vim:fileencoding=utf-8
from pybtex.database import Entry, Person
from pybtex.database import BibliographyData
reference_data = BibliographyData(
entries=[
('ruckenstein-diffusion', Entry('article',
fields={
'language': u'english',
'title': u'Predicting the Diffusion Coefficient in Supercritical Fluids',
'journal': u'Ind. Eng. Chem. Res.',
'volume': u'36',
'year': u'1997',
'pages': u'888-895',
},
persons={'author': [Person(u'Liu, Hongquin'), Person(u'Ruckenstein, Eli')]},
)),
('test-booklet', Entry('booklet',
fields={
'language': u'english',
'title': u'Just a booklet',
'year': u'2006',
'month': u'January',
'address': u'Moscow',
'howpublished': u'Published by Foo',
},
persons={'author': [Person(u'de Last, Jr., First Middle')]}
)),
('test-inbook', Entry('inbook',
fields={
'publisher': u'Some Publisher',
'language': u'english',
'title': u'Some Title',
'series': u'Some series',
'booktitle': u'Some Good Book',
'number': u'3',
'edition': u'Second',
'year': u'1933',
'pages': u'44--59',
},
persons={'author': [Person(u'Jackson, Peter')]}
)),
('viktorov-metodoj', Entry('book',
fields={
'publisher': u'Л.: <<Химия>>',
'year': u'1977',
'language': u'russian',
'title': u'Методы вычисления физико-химических величин и прикладные расчёты',
},
persons={'author': [Person(u'Викторов, Михаил Маркович')]}
)),
],
preamble=['%%% pybtex example file']
)
| 35.25
| 93
| 0.459473
|
befce7838d0c6d6ae667596c75b03e63f01ad478
| 846
|
py
|
Python
|
main.py
|
hectorrdz98/imageclassification-cisco
|
3f3ab94887e3ccb79853274490426b64b55568dc
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
hectorrdz98/imageclassification-cisco
|
3f3ab94887e3ccb79853274490426b64b55568dc
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
hectorrdz98/imageclassification-cisco
|
3f3ab94887e3ccb79853274490426b64b55568dc
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import flask
import subprocess
# Create an instance of Flask
app = flask.Flask(__name__)
@app.route("/images", methods=['POST'])
def images():
for file in flask.request.files.values():
filename = os.path.join(".", str(int(time.time())) + "_" + file.filename)
file.save(filename)
result = tf_classify(filename)
os.remove(filename)
return flask.jsonify(result)
return flask.jsonify([])
def tf_classify(name):
print("detecting %s..." % name)
p = subprocess.Popen(["python", "./tensorflow-models/tutorials/image/imagenet/classify_image.py", "--model_dir=.", "--image_file=" + name],
stdout=subprocess.PIPE)
out, _ = p.communicate()
out = out.decode("utf-8")
return out.split("\r\n")[1:-1]
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 30.214286
| 144
| 0.634752
|
68422639a0ffa078adca4eb341f99e0138ea79fa
| 16,894
|
py
|
Python
|
clearml/backend_interface/task/populate.py
|
glemarivero/clearml
|
28b85028fe4da3ab963b69e8ac0f7feef73cfcf6
|
[
"Apache-2.0"
] | null | null | null |
clearml/backend_interface/task/populate.py
|
glemarivero/clearml
|
28b85028fe4da3ab963b69e8ac0f7feef73cfcf6
|
[
"Apache-2.0"
] | null | null | null |
clearml/backend_interface/task/populate.py
|
glemarivero/clearml
|
28b85028fe4da3ab963b69e8ac0f7feef73cfcf6
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from functools import reduce
from logging import getLogger
from typing import Optional, Sequence, Union, Tuple
from six.moves.urllib.parse import urlparse
from pathlib2 import Path
from ...task import Task
from .repo import ScriptInfo
class CreateAndPopulate(object):
def __init__(
self,
project_name=None, # Optional[str]
task_name=None, # Optional[str]
task_type=None, # Optional[str]
repo=None, # Optional[str]
branch=None, # Optional[str]
commit=None, # Optional[str]
script=None, # Optional[str]
working_directory=None, # Optional[str]
packages=None, # Optional[Union[bool, Sequence[str]]]
requirements_file=None, # Optional[Union[str, Path]]
docker=None, # Optional[str]
base_task_id=None, # Optional[str]
add_task_init_call=True, # bool
raise_on_missing_entries=False, # bool
verbose=False, # bool
):
# type: (...) -> None
"""
Create a new Task from an existing code base.
If the code does not already contain a call to Task.init, pass add_task_init_call=True,
and the code will be patched in remote execution (i.e. when executed by `clearml-agent`
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task. Required if base_task_id is None.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param repo: Remote URL for the repository to use, or path to local copy of the git repository
Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
:param branch: Select specific repository branch/tag (implies the latest commit from the branch)
:param commit: Select specific commit id to use (default: latest commit,
or when used with local repository matching the local commit id)
:param script: Specify the entry point script for the remote execution. When used in tandem with
remote git repository the script should be a relative path inside the repository,
for example: './source/train.py' . When used with local repository path it supports a
direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
:param working_directory: Working directory to launch the script from. Default: repository root folder.
Relative to repo root or local folder.
:param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"]
or `True` to automatically create requirements
based on locally installed packages (repository must be local).
:param requirements_file: Specify requirements.txt file to install when setting the session.
If not provided, the requirements.txt from the repository will be used.
:param docker: Select the docker image to be executed in by the remote session
:param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
Essentially clones an existing task and overrides arguments/requirements.
:param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
:param raise_on_missing_entries: If True raise ValueError on missing entries when populating
:param verbose: If True print verbose logging
"""
if len(urlparse(repo).scheme) <= 1:
folder = repo
repo = None
else:
folder = None
if raise_on_missing_entries and not base_task_id:
if not script:
raise ValueError("Entry point script not provided")
if not repo and not folder and not Path(script).is_file():
raise ValueError("Script file \'{}\' could not be found".format(script))
if raise_on_missing_entries and commit and branch:
raise ValueError(
"Specify either a branch/tag or specific commit id, not both (either --commit or --branch)")
if raise_on_missing_entries and not folder and working_directory and working_directory.startswith('/'):
raise ValueError("working directory \'{}\', must be relative to repository root")
if requirements_file and not Path(requirements_file).is_file():
raise ValueError("requirements file could not be found \'{}\'")
self.folder = folder
self.commit = commit
self.branch = branch
self.repo = repo
self.script = script
self.cwd = working_directory
assert not packages or isinstance(packages, (tuple, list, bool))
self.packages = list(packages) if packages is not None and not isinstance(packages, bool) \
else (packages or None)
self.requirements_file = Path(requirements_file) if requirements_file else None
self.base_task_id = base_task_id
self.docker = docker
self.add_task_init_call = add_task_init_call
self.project_name = project_name
self.task_name = task_name
self.task_type = task_type
self.task = None
self.raise_on_missing_entries = raise_on_missing_entries
self.verbose = verbose
def create_task(self):
# type: () -> Task
"""
Create the new populated Task
:return: newly created Task object
"""
local_entry_file = None
repo_info = None
if self.folder or (self.script and Path(self.script).is_file()):
self.folder = os.path.expandvars(os.path.expanduser(self.folder)) if self.folder else None
self.script = os.path.expandvars(os.path.expanduser(self.script)) if self.script else None
self.cwd = os.path.expandvars(os.path.expanduser(self.cwd)) if self.cwd else None
if Path(self.script).is_file():
entry_point = self.script
else:
entry_point = (Path(self.folder) / self.script).as_posix()
entry_point = os.path.abspath(entry_point)
if not os.path.isfile(entry_point):
raise ValueError("Script entrypoint file \'{}\' could not be found".format(entry_point))
local_entry_file = entry_point
repo_info, requirements = ScriptInfo.get(
filepaths=[entry_point],
log=getLogger(),
create_requirements=self.packages is True, uncommitted_from_remote=True,
detect_jupyter_notebook=False)
# check if we have no repository and no requirements raise error
if self.raise_on_missing_entries and (not self.requirements_file and not self.packages) \
and not self.repo and (
not repo_info or not repo_info.script or not repo_info.script.get('repository')):
raise ValueError("Standalone script detected \'{}\', but no requirements provided".format(self.script))
if self.base_task_id:
if self.verbose:
print('Cloning task {}'.format(self.base_task_id))
task = Task.clone(source_task=self.base_task_id, project=Task.get_project_id(self.project_name))
else:
# noinspection PyProtectedMember
task = Task._create(
task_name=self.task_name, project_name=self.project_name,
task_type=self.task_type or Task.TaskTypes.training)
# if there is nothing to populate, return
if not any([
self.folder, self.commit, self.branch, self.repo, self.script, self.cwd,
self.packages, self.requirements_file, self.base_task_id, self.docker
]):
return task
task_state = task.export_task()
if 'script' not in task_state:
task_state['script'] = {}
if repo_info:
task_state['script']['repository'] = repo_info.script['repository']
task_state['script']['version_num'] = repo_info.script['version_num']
task_state['script']['branch'] = repo_info.script['branch']
task_state['script']['diff'] = repo_info.script['diff'] or ''
task_state['script']['working_dir'] = repo_info.script['working_dir']
task_state['script']['entry_point'] = repo_info.script['entry_point']
task_state['script']['binary'] = repo_info.script['binary']
task_state['script']['requirements'] = repo_info.script.get('requirements') or {}
if self.cwd:
self.cwd = self.cwd
cwd = self.cwd if Path(self.cwd).is_dir() else (
Path(repo_info.script['repo_root']) / self.cwd).as_posix()
if not Path(cwd).is_dir():
raise ValueError("Working directory \'{}\' could not be found".format(cwd))
cwd = Path(cwd).relative_to(repo_info.script['repo_root']).as_posix()
entry_point = \
Path(repo_info.script['repo_root']) / repo_info.script['working_dir'] / repo_info.script[
'entry_point']
entry_point = entry_point.relative_to(cwd).as_posix()
task_state['script']['entry_point'] = entry_point
task_state['script']['working_dir'] = cwd
elif self.repo:
# normalize backslashes and remove first one
entry_point = '/'.join([p for p in self.script.split('/') if p and p != '.'])
cwd = '/'.join([p for p in (self.cwd or '.').split('/') if p and p != '.'])
if cwd and entry_point.startswith(cwd + '/'):
entry_point = entry_point[len(cwd) + 1:]
task_state['script']['repository'] = self.repo
task_state['script']['version_num'] = self.commit or None
task_state['script']['branch'] = self.branch or None
task_state['script']['diff'] = ''
task_state['script']['working_dir'] = cwd or '.'
task_state['script']['entry_point'] = entry_point
else:
# standalone task
task_state['script']['entry_point'] = self.script
task_state['script']['working_dir'] = '.'
# update requirements
reqs = []
if self.requirements_file:
with open(self.requirements_file.as_posix(), 'rt') as f:
reqs = [line.strip() for line in f.readlines()]
if self.packages and self.packages is not True:
reqs += self.packages
if reqs:
# make sure we have clearml.
clearml_found = False
for line in reqs:
if line.strip().startswith('#'):
continue
package = reduce(lambda a, b: a.split(b)[0], "#;@=~<>", line).strip()
if package == 'clearml':
clearml_found = True
break
if not clearml_found:
reqs.append('clearml')
task_state['script']['requirements'] = {'pip': '\n'.join(reqs)}
elif not self.repo and repo_info and not repo_info.script.get('requirements'):
# we are in local mode, make sure we have "requirements.txt" it is a must
reqs_txt_file = Path(repo_info.script['repo_root']) / "requirements.txt"
if self.raise_on_missing_entries and not reqs_txt_file.is_file():
raise ValueError(
"requirements.txt not found [{}] "
"Use --requirements or --packages".format(reqs_txt_file.as_posix()))
if self.add_task_init_call:
script_entry = os.path.abspath('/' + task_state['script'].get('working_dir', '.') +
'/' + task_state['script']['entry_point'])
idx_a = 0
# find the right entry for the patch if we have a local file (basically after __future__
if local_entry_file:
with open(local_entry_file, 'rt') as f:
lines = f.readlines()
future_found = -1
for i, line in enumerate(lines):
tokens = [t.strip() for t in line.split(' ') if t.strip()]
if tokens and tokens[0] in ('import', 'from',):
if '__future__' in line:
future_found = i
else:
break
if future_found >= 0:
idx_a = future_found + 1
task_init_patch = ''
if self.repo:
# if we do not have requirements, add clearml to the requirements.txt
if not reqs:
task_init_patch += \
"diff --git a/requirements.txt b/requirements.txt\n" \
"--- a/requirements.txt\n" \
"+++ b/requirements.txt\n" \
"@@ -0,0 +1,1 @@\n" \
"+clearml\n"
# Add Task.init call
task_init_patch += \
"diff --git a{script_entry} b{script_entry}\n" \
"--- a{script_entry}\n" \
"+++ b{script_entry}\n" \
"@@ -{idx_a},0 +{idx_b},3 @@\n" \
"+from clearml import Task\n" \
"+Task.init()\n" \
"+\n".format(
script_entry=script_entry, idx_a=idx_a, idx_b=idx_a + 1)
else:
# Add Task.init call
task_init_patch += \
"from clearml import Task\n" \
"Task.init()\n\n"
task_state['script']['diff'] = task_init_patch + task_state['script'].get('diff', '')
# set base docker image if provided
if self.docker:
task.set_base_docker(self.docker)
if self.verbose:
if task_state['script']['repository']:
repo_details = {k: v for k, v in task_state['script'].items()
if v and k not in ('diff', 'requirements', 'binary')}
print('Repository Detected\n{}'.format(json.dumps(repo_details, indent=2)))
else:
print('Standalone script detected\n Script: {}'.format(self.script))
if task_state['script'].get('requirements') and \
task_state['script']['requirements'].get('pip'):
print('Requirements:{}{}'.format(
'\n Using requirements.txt: {}'.format(
self.requirements_file.as_posix()) if self.requirements_file else '',
'\n {}Packages: {}'.format('Additional ' if self.requirements_file else '', self.packages)
if self.packages else ''
))
if self.docker:
print('Base docker image: {}'.format(self.docker))
# update the Task
task.update_task(task_state)
self.task = task
return task
def update_task_args(self, args=None):
# type: (Optional[Union[Sequence[str], Sequence[Tuple[str, str]]]]) -> ()
"""
Update the newly created Task argparse Arguments
If called before Task created, used for argument verification
:param args: Arguments to pass to the remote execution, list of string pairs (argument, value) or
list of strings '<argument>=<value>'. Example: ['lr=0.003', (batch_size, 64)]
"""
if not args:
return
# check args are in format <key>=<value>
args_list = []
for a in args:
if isinstance(a, (list, tuple)):
assert len(a) == 2
args_list.append(a)
continue
try:
parts = a.split('=', 1)
assert len(parts) == 2
args_list.append(parts)
except Exception:
raise ValueError(
"Failed parsing argument \'{}\', arguments must be in \'<key>=<value>\' format")
if not self.task:
return
task_params = self.task.get_parameters()
args_list = {'Args/{}'.format(k): v for k, v in args_list}
task_params.update(args_list)
self.task.set_parameters(task_params)
def get_id(self):
# type: () -> Optional[str]
"""
:return: Return the created Task id (str)
"""
return self.task.id if self.task else None
| 48.82659
| 120
| 0.575352
|
05c81881b5c765f006e11e8b5944c8bec7c0772b
| 31,217
|
py
|
Python
|
packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py
|
souravrhythm/PySyft
|
08b44682959970ef395c529e92651fa77284124a
|
[
"Apache-1.1"
] | 8,428
|
2017-08-10T09:17:49.000Z
|
2022-03-31T08:20:14.000Z
|
packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py
|
bayegaspard/PySyft
|
f92dd6567bb16c6c91f4659e0ba109ad3a0e6cd5
|
[
"Apache-1.1"
] | 4,779
|
2017-08-09T23:19:00.000Z
|
2022-03-29T11:49:36.000Z
|
packages/syft/src/syft/core/tensor/smpc/mpc_tensor.py
|
bayegaspard/PySyft
|
f92dd6567bb16c6c91f4659e0ba109ad3a0e6cd5
|
[
"Apache-1.1"
] | 2,307
|
2017-08-10T08:52:12.000Z
|
2022-03-30T05:36:07.000Z
|
# future
from __future__ import annotations
# stdlib
import functools
import itertools
import secrets
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
import numpy.typing as npt
import torch
# relative
from . import utils
from .... import logger
from ....ast.klass import get_run_class_method
from ....grid import GridURL
from ...smpc.protocol.spdz import spdz
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import SupportedChainType # type: ignore
from ..util import implements # type: ignore
from .share_tensor import ShareTensor
METHODS_FORWARD_ALL_SHARES = {
"repeat",
"copy",
"diagonal",
"flatten",
"transpose",
"partition",
"resize",
"ravel",
"compress",
"reshape",
"squeeze",
"swapaxes",
"sum",
"__pos__",
"__neg__",
"take",
"choose",
"cumsum",
"trace",
}
INPLACE_OPS = {
"resize",
}
PARTIES_REGISTER_CACHE: Dict[Any, GridURL] = {}
class MPCTensor(PassthroughTensor):
__slots__ = (
"seed_przs",
"mpc_shape",
"parties",
"parties_info",
"ring_size",
)
def __init__(
self,
parties: List[Any],
secret: Optional[Any] = None,
shares: Optional[List[ShareTensor]] = None,
shape: Optional[Tuple[int, ...]] = None,
seed_przs: Optional[int] = None,
ring_size: Optional[int] = None,
) -> None:
if secret is None and shares is None:
raise ValueError("Secret or shares should be populated!")
if (shares is not None) and (not isinstance(shares, (tuple, list))):
raise ValueError("Shares should be a list or tuple")
if seed_przs is None:
# Allow the user to specify if they want to use a specific seed when generating the shares
# ^This is unsecure and should be used with cautioness
seed_przs = secrets.randbits(32)
self.seed_przs = seed_przs
self.parties = parties
self.parties_info = MPCTensor.get_parties_info(parties)
if ring_size is not None:
self.ring_size = ring_size
else:
self.ring_size = MPCTensor.get_ring_size_from_secret(secret, shares)
self.mpc_shape = shape
# TODO: We can get this from the the secret if the secret is local
# TODO: https://app.clubhouse.io/openmined/story/1128/tech-debt-for-adp-smpc-demo?stories_sort_by\
# =priority&stories_group_by=WORKFLOW_STATE
if shape is None:
raise ValueError("Shape of the secret should be known")
if secret is not None:
shares = MPCTensor._get_shares_from_secret(
secret=secret,
parties=parties,
parties_info=self.parties_info,
shape=shape,
seed_przs=seed_przs,
ring_size=self.ring_size,
)
if shares is None:
raise ValueError("Shares should not be None at this step")
res = list(MPCTensor._mpc_from_shares(shares, parties=parties))
# we need to make sure that when we zip up clients from
# multiple MPC tensors that they are in corresponding order
# so we always sort all of them by the id of the domain
# TODO: store children as lists of dictionaries because eventually
# it's likely that we have multiple shares from the same client
# (For example, if you wanted a domain to have 90% share ownership
# you'd need to produce 10 shares and give 9 of them to the same domain)
# TODO captured: https://app.clubhouse.io/openmined/story/1128/tech-debt-for-adp-smpc-\
# demo?stories_sort_by=priority&stories_group_by=WORKFLOW_STATE
res.sort(key=lambda share: share.client.name + share.client.id.no_dash)
super().__init__(res)
@staticmethod
def get_ring_size_from_secret(
secret: Optional[Any] = None, shares: Optional[List[Any]] = None
) -> int:
if secret is None:
value = shares[0] # type: ignore
else:
value = secret
if utils.ispointer(value):
dtype = getattr(value, "public_dtype", None)
else:
dtype = getattr(value, "dtype", None)
ring_size = utils.TYPE_TO_RING_SIZE.get(dtype, None)
if ring_size is not None:
return ring_size
logger.warning("Ring size was not found! Defaulting to 2**32.")
return 2 ** 32
@staticmethod
def get_parties_info(parties: Iterable[Any]) -> List[GridURL]:
# relative
from ....grid.client import GridHTTPConnection
parties_info: List[GridURL] = []
for party in parties:
connection = party.routes[0].connection
if not isinstance(connection, GridHTTPConnection):
raise TypeError(
f"You tried to pass {type(connection)} for multiplication dependent operation."
+ "Currently Syft works only with hagrid"
+ "We apologize for the inconvenience"
+ "We will add support for local python objects very soon."
)
base_url = PARTIES_REGISTER_CACHE.get(party, None)
if base_url is None:
base_url = connection.base_url
PARTIES_REGISTER_CACHE[party] = base_url
try:
pass
# We do not use sy.register, should reenable after fixing.
# sy.register( # nosec
# name="Howard Wolowtiz",
# email="howard@mit.edu",
# password="astronaut",
# url=url,
# port=port,
# verbose=False,
# )
except Exception:
""" """
# TODO : should modify to return same client if registered.
# print("Proxy Client already User Register", e)
if base_url is not None:
parties_info.append(base_url)
else:
raise Exception(
f"Failed to get GridURL from {base_url} for party {party}."
)
return parties_info
def publish(self, sigma: float) -> MPCTensor:
new_shares = []
for share in self.child:
share.block
for share in self.child:
new_share = share.publish(sigma=sigma)
new_shares.append(new_share)
return MPCTensor(
parties=self.parties,
shares=new_shares,
shape=self.mpc_shape,
seed_przs=self.seed_przs,
)
@property
def shape(self) -> Optional[Tuple[int, ...]]:
return self.mpc_shape
@staticmethod
def _mpc_from_shares(
shares: List[ShareTensor],
parties: Optional[List[Any]] = None,
) -> List[ShareTensor]:
if not isinstance(shares, (list, tuple)):
raise ValueError("_mpc_from_shares expected a list or tuple of shares")
if utils.ispointer(shares[0]):
# Remote shares
return shares
elif parties is None:
raise ValueError(
"Parties should not be None if shares are not already sent to parties"
)
else:
return MPCTensor._mpc_from_local_shares(shares, parties)
@staticmethod
def _mpc_from_local_shares(
shares: List[ShareTensor], parties: List[Any]
) -> List[ShareTensor]:
# TODO: ShareTensor needs to have serde serializer/deserializer
shares_ptr = [share.send(party) for share, party in zip(shares, parties)]
return shares_ptr
@staticmethod
def _get_shares_from_secret(
secret: Any,
parties: List[Any],
shape: Tuple[int, ...],
seed_przs: int,
parties_info: List[GridURL],
ring_size: int,
) -> List[ShareTensor]:
if utils.ispointer(secret):
if shape is None:
raise ValueError("Shape must be specified when the secret is remote")
return MPCTensor._get_shares_from_remote_secret(
secret=secret,
shape=shape,
parties=parties,
seed_przs=seed_przs,
parties_info=parties_info,
ring_size=ring_size,
)
return MPCTensor._get_shares_from_local_secret(
secret=secret,
seed_przs=seed_przs,
ring_size=ring_size,
shape=shape,
parties_info=parties_info,
)
@staticmethod
def _get_shares_from_remote_secret(
secret: Any,
shape: Tuple[int, ...],
parties: List[Any],
seed_przs: int,
parties_info: List[GridURL],
ring_size: int,
) -> List[ShareTensor]:
shares = []
for i, party in enumerate(parties):
if secret is not None and party == secret.client:
value = secret
else:
value = None
# relative
from ..autodp.single_entity_phi import (
TensorWrappedSingleEntityPhiTensorPointer,
)
if isinstance(secret, TensorWrappedSingleEntityPhiTensorPointer):
share_wrapper = secret.to_local_object_without_private_data_child()
share_wrapper_pointer = share_wrapper.send(party)
remote_share = party.syft.core.tensor.smpc.share_tensor.ShareTensor.generate_przs_on_dp_tensor(
rank=i,
parties_info=parties_info,
value=value,
shape=shape,
seed_przs=seed_przs,
share_wrapper=share_wrapper_pointer,
ring_size=ring_size,
)
else:
remote_share = (
party.syft.core.tensor.smpc.share_tensor.ShareTensor.generate_przs(
rank=i,
parties_info=parties_info,
value=value,
shape=shape,
seed_przs=seed_przs,
ring_size=ring_size,
)
)
shares.append(remote_share)
return shares
@staticmethod
def _get_shares_from_local_secret(
secret: Any,
shape: Tuple[int, ...],
seed_przs: int,
parties_info: List[GridURL],
ring_size: int = 2 ** 32,
) -> List[ShareTensor]:
shares = []
nr_parties = len(parties_info)
for i in range(nr_parties):
if i == nr_parties - 1:
value = secret
else:
value = None
local_share = ShareTensor.generate_przs(
rank=i,
parties_info=parties_info,
value=value,
shape=shape,
seed_przs=seed_przs,
init_clients=False,
ring_size=ring_size,
)
shares.append(local_share)
return shares
def request(
self,
reason: str = "",
block: bool = False,
timeout_secs: Optional[int] = None,
verbose: bool = False,
) -> None:
for child in self.child:
child.request(
reason=reason, block=block, timeout_secs=timeout_secs, verbose=verbose
)
@property
def block(self) -> "MPCTensor":
"""Block until all shares have been created."""
for share in self.child:
share.block
return self
def block_with_timeout(self, secs: int, secs_per_poll: int = 1) -> "MPCTensor":
"""Block until all shares have been created or until timeout expires."""
for share in self.child:
share.block_with_timeout(secs=secs, secs_per_poll=secs_per_poll)
return self
def reconstruct(self) -> np.ndarray:
# TODO: It might be that the resulted shares (if we run any computation) might
# not be available at this point. We need to have this fail well with a nice
# description as to which node wasn't able to be reconstructued.
# Captured: https://app.clubhouse.io/openmined/story/1128/tech-debt-for-adp-smpc-demo?\
# stories_sort_by=priority&stories_group_by=WORKFLOW_STATE
# for now we need to convert the values coming back to int32
# sometimes they are floats coming from DP
def convert_child_numpy_type(tensor: Any, np_type: type) -> Any:
if isinstance(tensor, np.ndarray):
return np.array(tensor, np_type)
if hasattr(tensor, "child"):
tensor.child = convert_child_numpy_type(
tensor=tensor.child, np_type=np_type
)
return tensor
dtype = utils.RING_SIZE_TO_TYPE.get(self.ring_size, None)
if dtype is None:
raise ValueError(f"Type for ring size {self.ring_size} was not found!")
for share in self.child:
if not share.exists:
raise Exception(
"One of the shares doesn't exist. This probably means the SMPC "
"computation isn't yet complete. Try again in a moment or call .block.reconstruct()"
"instead to block until the SMPC operation is complete which creates this variable."
)
local_shares = []
for share in self.child:
res = share.get()
res = convert_child_numpy_type(res, dtype)
local_shares.append(res)
is_share_tensor = isinstance(local_shares[0], ShareTensor)
if is_share_tensor:
local_shares = [share.child for share in local_shares]
result = local_shares[0]
op = ShareTensor.get_op(self.ring_size, "add")
for share in local_shares[1:]:
result = op(result, share)
if hasattr(result, "child") and isinstance(result.child, ShareTensor):
return result.child.child
return result
get = reconstruct
@staticmethod
def hook_method(__self: MPCTensor, method_name: str) -> Callable[..., Any]:
"""Hook a framework method.
Args:
method_name (str): method to hook
Returns:
A hooked method
"""
def method_all_shares(
_self: MPCTensor, *args: List[Any], **kwargs: Dict[Any, Any]
) -> Any:
shares = []
for share in _self.child:
method = getattr(share, method_name)
new_share = method(*args, **kwargs)
shares.append(new_share)
# TODO: generalize type after fixed precision
dummy_res = np.random.randint(
_self.mpc_shape[0], size=_self.mpc_shape, dtype=np.int32 # type: ignore
)
if method_name not in INPLACE_OPS:
dummy_res = getattr(dummy_res, method_name)(*args, **kwargs)
else:
getattr(dummy_res, method_name)(*args, **kwargs)
new_shape = dummy_res.shape
res = MPCTensor(parties=_self.parties, shares=shares, shape=new_shape)
return res
return functools.partial(method_all_shares, __self)
def __getattribute__(self, attr_name: str) -> Any:
if attr_name in METHODS_FORWARD_ALL_SHARES:
return MPCTensor.hook_method(self, attr_name)
return object.__getattribute__(self, attr_name)
@staticmethod
def reshare(mpc_tensor: MPCTensor, parties: Iterable[Any]) -> MPCTensor:
"""Reshare a given secret to a superset of parties.
Args:
mpc_tensor(MPCTensor): input MPCTensor to reshare.
parties(List[Any]): Input parties List.
Returns:
res_mpc(MPCTensor): Reshared MPCTensor.
Raises:
ValueError: If the input MPCTensor and input parties are same.
Note:
We provide an additional layer of abstraction such that,
when computation is performed on data belonging to different parties
The underlying secret are automatically converted into secret shares of their input.
Assume there are two parties Parties P1,P2
tensor_1 = data_pointer_1 (party 1 data)
tensor_2 = data_pointer_2 (party 2 data)
result -------> tensor_1+ tensor_1 (local computation as the data
belongs to the same party)
Interesting scenario is when
result --------> tensor_1+tensor_2
Each tensor belongs to two different parties.
There are automatically secret shared without the user
knowing that a MPCTensor is being created underneath.
"""
mpc_parties = set(mpc_tensor.parties)
parties = set(parties)
shape = mpc_tensor.shape
seed_przs = mpc_tensor.seed_przs
client_map = {share.client: share for share in mpc_tensor.child}
if mpc_parties == parties:
raise ValueError(
"Input parties for resharing are same as the input parties."
)
parties_info = MPCTensor.get_parties_info(parties)
shares = [client_map.get(party) for party in parties]
for i, party in enumerate(parties):
shares[
i
] = party.syft.core.tensor.smpc.share_tensor.ShareTensor.generate_przs(
rank=i,
parties_info=parties_info,
value=shares[i],
shape=shape,
seed_przs=seed_przs,
ring_size=mpc_tensor.ring_size,
)
res_mpc = MPCTensor(shares=shares, ring_size=mpc_tensor.ring_size, shape=shape, parties=parties) # type: ignore
return res_mpc
@staticmethod
def sanity_checks(mpc_tensor: MPCTensor, other: Any) -> Tuple[MPCTensor, Any]:
"""Performs sanity checks to share data to whole superset of parites involved.
Args:
mpc_tensor(MPCTensor): input MPCTensor to perform sanity check on.
other (Any): input operand.
Returns:
Tuple[MPCTensor,Any]: Rehared Tensor values.
"""
if utils.ispointer(other):
parties = mpc_tensor.parties
client = other.client
public_shape = other.public_shape
if public_shape is None:
# TODO: Should be modified after Trask's Synthetic data PR.
raise ValueError("The input tensor pointer should have public shape.")
if client not in parties:
new_parties = [client]
new_parties += parties
mpc_tensor = MPCTensor.reshare(mpc_tensor, new_parties)
other = MPCTensor(secret=other, parties=new_parties, shape=public_shape)
elif isinstance(other, MPCTensor):
p1 = set(mpc_tensor.parties) # parties in first MPCTensor
p2 = set(other.parties) # parties in second MPCTensor.
if p1 != p2:
parties_union = p1.union(p2)
mpc_tensor = (
MPCTensor.reshare(mpc_tensor, parties_union)
if p1 != parties_union
else mpc_tensor
)
other = (
MPCTensor.reshare(other, parties_union)
if p2 != parties_union
else other
)
return mpc_tensor, other
def __apply_private_op(
self, other: MPCTensor, op_str: str, **kwargs: Dict[Any, Any]
) -> List[ShareTensor]:
# relative
from ..tensor import TensorPointer
op_method = f"__{op_str}__"
if op_str in {"add", "sub"}:
if len(self.child) != len(other.child):
raise ValueError(
"Zipping two different lengths will drop data. "
+ f"{len(self.child)} vs {len(other.child)}"
)
if not isinstance(self.child[0], TensorPointer):
res_shares = [
getattr(a, op_method)(a, b, **kwargs)
for a, b in zip(self.child, other.child)
]
else:
res_shares = []
attr_path_and_name = f"{self.child[0].path_and_name}.__{op_str}__"
op = get_run_class_method(attr_path_and_name, SMPC=True)
for x, y in zip(self.child, other.child):
res_shares.append(op(x, x, y, **kwargs))
else:
raise ValueError(f"MPCTensor Private {op_str} not supported")
return res_shares
def __apply_public_op(
self, y: Any, op_str: str, **kwargs: Dict[Any, Any]
) -> List[ShareTensor]:
# relative
from ..tensor import TensorPointer
op_method = f"__{op_str}__"
if op_str in {"mul", "matmul", "add", "sub"}:
if not isinstance(self.child[0], TensorPointer):
res_shares = [
getattr(share, op_method)(share, y, **kwargs)
for share in self.child
]
else:
res_shares = []
attr_path_and_name = f"{self.child[0].path_and_name}.__{op_str}__"
op = get_run_class_method(attr_path_and_name, SMPC=True)
for share in self.child:
res_shares.append(op(share, share, y, **kwargs))
else:
raise ValueError(f"MPCTensor Public {op_str} not supported")
return res_shares
def __apply_op(
self,
y: Union[int, float, torch.Tensor, np.ndarray, MPCTensor],
op_str: str,
) -> MPCTensor:
"""Apply an operation on "self" which is a MPCTensor "y".
This function checks if "y" is private or public value.
Args:
y (Union[int, float, torch.Tensor, np.ndarray, MPCTensor]: tensor to apply the operation.
op_str (str): the operation.
Returns:
MPCTensor. the operation "op_str" applied on "self" and "y"
"""
x, y = MPCTensor.sanity_checks(self, y)
kwargs: Dict[Any, Any] = {"seed_id_locations": secrets.randbits(64)}
if isinstance(y, MPCTensor):
result = x.__apply_private_op(y, op_str, **kwargs)
y_ring_size = y.ring_size
else:
result = x.__apply_public_op(y, op_str, **kwargs)
y_ring_size = MPCTensor.get_ring_size_from_secret(y)
if self.ring_size != y_ring_size:
raise ValueError(
f"Ring size mismatch between self {self.ring_size} and other {y_ring_size}"
)
y_shape = getattr(y, "shape", (1,))
shape = utils.get_shape(op_str, self.shape, y_shape)
ring_size = utils.get_ring_size(self.ring_size, y_ring_size)
result = MPCTensor(
shares=result, shape=shape, ring_size=ring_size, parties=x.parties
)
return result
def add(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
"""Apply the "add" operation between "self" and "y".
Args:
y (Union[MPCTensor, torch.Tensor, float, int]): self + y
Returns:
MPCTensor. Result of the operation.
"""
res = self.__apply_op(y, "add")
return res
def sub(self, y: MPCTensor) -> MPCTensor:
res = self.__apply_op(y, "sub")
return res
def rsub(self, y: MPCTensor) -> MPCTensor:
new_self = self * (-1)
res = new_self.__apply_op(y, "add")
return res
def mul(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
# relative
from ..tensor import TensorPointer
self, y = MPCTensor.sanity_checks(self, y)
kwargs: Dict[Any, Any] = {"seed_id_locations": secrets.randbits(64)}
op = "__mul__"
res_shares: List[Any] = []
if isinstance(y, MPCTensor):
res_shares = spdz.mul_master(self, y, "mul", **kwargs)
else:
if not isinstance(self.child[0], TensorPointer):
res_shares = [
getattr(a, op)(a, b, **kwargs) for a, b in zip(self.child, itertools.repeat(y)) # type: ignore
]
else:
attr_path_and_name = f"{self.child[0].path_and_name}.__mul__"
tensor_op = get_run_class_method(attr_path_and_name, SMPC=True)
for share in self.child:
res_shares.append(tensor_op(share, share, y, **kwargs))
y_shape = getattr(y, "shape", (1,))
new_shape = utils.get_shape("mul", self.mpc_shape, y_shape)
res = MPCTensor(
parties=self.parties,
shares=res_shares,
shape=new_shape,
ring_size=self.ring_size,
)
return res
def lt(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
mpc_res = spdz.lt_master(self, y, "mul") # type: ignore
return mpc_res
def gt(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
mpc_res = MPCTensor.lt(y, self) # type: ignore
return mpc_res
def ge(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
mpc_res = 1 - MPCTensor.lt(self, y)
return mpc_res # type: ignore
def le(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
mpc_res = 1 - MPCTensor.lt(y, self) # type: ignore
return mpc_res # type: ignore
def eq(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
# TODO: Should make two comparisons parallel
mpc_res = MPCTensor.le(self, y) - MPCTensor.lt(self, y)
return mpc_res
def ne(
self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]
) -> MPCTensor:
mpc_res = 1 - MPCTensor.eq(self, y)
return mpc_res # type: ignore
def matmul(
self, y: Union[int, float, np.ndarray, torch.tensor, "MPCTensor"]
) -> MPCTensor:
"""Apply the "matmul" operation between "self" and "y"
Args:
y (Union[int, float, np.ndarray, torch.tensor, "MPCTensor"]): self @ y
Returns:
MPCTensor: Result of the opeartion.
"""
if isinstance(y, ShareTensor):
raise ValueError("Private matmul not supported yet")
res = self.__apply_op(y, "matmul")
return res
def put(
self,
indices: npt.ArrayLike,
values: npt.ArrayLike,
mode: Optional[str] = "raise",
) -> MPCTensor:
"""Performs Numpy put operation on the underlying ShareTensors.
Args:
indices (npt.ArrayLike): Target indices, interpreted as integers.
values (npt.ArrayLike): Values to place at target indices.
mode (Optional[str]): Specifies how out-of-bounds indices will behave.
Returns:
res (MPCTensor): Result of the operation.
"""
shares = []
shares.append(self.child[0].put(indices, values, mode))
# since the value is public we assign directly to prevent overhead of random share creation.
zero = np.zeros_like(values)
for share in self.child[1::]:
shares.append(share.put(indices, zero.copy(), mode))
res = MPCTensor(shares=shares, parties=self.parties, shape=self.shape)
return res
def sign(self) -> MPCTensor:
"""Calculate sign of given tensor.
Returns:
MPCTensor: with computed sign.
"""
res: MPCTensor = 2 * (self > 0) - 1 # type: ignore
return res
def __abs__(self) -> MPCTensor:
"""Calculates absolute value of MPCTensor.
Returns:
MPCTensor: computed absolute values of underlying tensor.
"""
return self * self.sign()
def __pow__(self, power: int) -> MPCTensor:
"""Compute integer power of a number iteratively using mul.
- Divide power by 2 and multiply base to itself (if the power is even)
- Decrement power by 1 to make it even and then follow the first step
Args:
power (int): integer value to apply the
Returns:
MPCTensor: Result of the pow operation
Raises:
RuntimeError: if negative power is given
"""
# TODO: Implement after we have reciprocal function.
if power < 0:
raise RuntimeError("Negative integer powers not supported yet.")
base = self
# TODO: should modify for general ring sizes.
result = np.ones(shape=self.shape, dtype=np.int32)
while power > 0:
# If power is odd
if power % 2 == 1:
result = base * result
result.block
# Divide the power by 2
power = power // 2
# Multiply base to itself
base = base * base
base.block
return result
def __str__(self) -> str:
res = "MPCTensor"
for share in self.child:
res = f"{res}\n\t{share}"
return res
@property
def synthetic(self) -> np.ndarray:
# TODO finish. max_vals and min_vals not available at present.
return (
np.random.rand(*list(self.shape)) * (self.max_vals - self.min_vals) # type: ignore
+ self.min_vals
).astype(self.public_dtype)
def __repr__(self) -> str:
# out = self.synthetic.__repr__()
# out += "\n\n (The data printed above is synthetic - it's an imitation of the real data.)"
out = ""
out += "\n\nMPCTensor"
out += ".shape=" + str(self.shape) + "\n"
for i, child in enumerate(self.child):
out += f"\t .child[{i}] = " + child.__repr__() + "\n"
out = out[:-1] + ""
return out
__add__ = add
__radd__ = add
__sub__ = sub
__rsub__ = rsub
__mul__ = mul
__rmul__ = mul
__matmul__ = matmul
__lt__ = lt
__gt__ = gt
__ge__ = ge
__le__ = le
__eq__ = eq
__ne__ = ne
@implements(MPCTensor, np.add)
def add(x: np.ndarray, y: MPCTensor) -> SupportedChainType:
return y.add(x)
@implements(MPCTensor, np.subtract)
def sub(x: np.ndarray, y: MPCTensor) -> SupportedChainType:
return y.rsub(x)
@implements(MPCTensor, np.multiply)
def mul(x: np.ndarray, y: MPCTensor) -> SupportedChainType:
return y.mul(x)
# @implements(MPCTensor, np.greater)
# def mul(x: np.ndarray, y: MPCTensor) -> SupportedChainType:
# return y.gt(x)
| 33.280384
| 120
| 0.572925
|
b46d7aaf79ad7c30dae4a1a8ee7f51facaac5943
| 1,381
|
py
|
Python
|
HTC_lib/VASP/KPOINTS/Write_HSE06_band_str_KPOINTS.py
|
bitsoal/VASP_HTC_framework
|
e947227f238fc3ff4d950c1e5fd0a4995b9bf3eb
|
[
"MIT"
] | 10
|
2018-03-06T01:27:53.000Z
|
2021-12-03T07:27:33.000Z
|
HTC_lib/VASP/KPOINTS/Write_HSE06_band_str_KPOINTS.py
|
bitsoal/VASP_HTC_framework
|
e947227f238fc3ff4d950c1e5fd0a4995b9bf3eb
|
[
"MIT"
] | null | null | null |
HTC_lib/VASP/KPOINTS/Write_HSE06_band_str_KPOINTS.py
|
bitsoal/VASP_HTC_framework
|
e947227f238fc3ff4d950c1e5fd0a4995b9bf3eb
|
[
"MIT"
] | 5
|
2018-03-06T02:08:58.000Z
|
2021-08-18T02:12:40.000Z
|
# coding: utf-8
# In[5]:
import re, os, sys
# In[21]:
def read_band_str_kpoints_from_OUTCAR(cal_loc):
kpoint_list = []
with open(os.path.join(cal_loc, "OUTCAR"), "r") as f:
for line in f:
if "k-point" in line and 'plane waves' in line:
kpoint = line.split(":")[1].split("plane waves")[0]
kpoint_list.append(re.findall("-*[\.0-9]+", kpoint))
return kpoint_list
def read_scf_IBZKPT(cal_loc):
with open(os.path.join(cal_loc, "IBZKPT"), "r") as f:
kpoint_list = [line.strip() for line in f if line.strip()]
no_of_kpoints = int(kpoint_list[1])
#import pprint
#pprint.pprint(kpoint_list[:no_of_kpoints+3])
return kpoint_list[:no_of_kpoints+3]
# In[22]:
def write_HSE06_band_str_KPOINTS(IBZKPT, band_str_KPT):
IBZKPT[1] = str(int(IBZKPT[1]) + len(band_str_KPT))
with open("KPOINTS", "w") as f:
for line in IBZKPT:
f.write(line + "\n")
for line in band_str_KPT:
f.write("{} {} {} ".format(*line) + "0\n")
# In[23]:
if __name__ == '__main__':
#IBZKPT = read_scf_IBZKPT("data")
#band_str_KPT = read_band_str_kpoints_from_OUTCAR("data/")
IBZKPT = read_scf_IBZKPT(sys.argv[1])
band_str_KPT = read_band_str_kpoints_from_OUTCAR(sys.argv[2])
write_HSE06_band_str_KPOINTS(IBZKPT, band_str_KPT)
| 26.056604
| 68
| 0.615496
|
95949b0241062b4801ccad8a0bbd993c9735e0cb
| 502
|
py
|
Python
|
Python/5KYU/MaximumSubArraySum.py
|
CasperKristiansson/Code-Wars-Challenges
|
5ffe276d2c30c42349dfa1482b2bdf14782ce43e
|
[
"MIT"
] | null | null | null |
Python/5KYU/MaximumSubArraySum.py
|
CasperKristiansson/Code-Wars-Challenges
|
5ffe276d2c30c42349dfa1482b2bdf14782ce43e
|
[
"MIT"
] | null | null | null |
Python/5KYU/MaximumSubArraySum.py
|
CasperKristiansson/Code-Wars-Challenges
|
5ffe276d2c30c42349dfa1482b2bdf14782ce43e
|
[
"MIT"
] | null | null | null |
# https://www.codewars.com/kata/54521e9ec8e60bc4de000d6c/train/python
def max_sequence(arr):
total = 0
for i in range(len(arr)):
for j in range(len(arr), -1, -1):
if(sum(arr[i:j + 1]) > total):
total = sum(arr[i:j + 1])
return total
# Other solutions with O(n) time complexity
def maxSequence(arr):
maxl = 0
maxg = 0
for n in arr:
maxl = max(0, maxl + n)
maxg = max(maxg, maxl)
print(maxl, maxg)
return maxg
| 25.1
| 69
| 0.557769
|
ebea0040ccf72c6b0fe73cc7514aea5681a38acb
| 2,060
|
py
|
Python
|
setup.py
|
raviraina/pybagit
|
86ac67038f57d350dd823c0878328376554b2838
|
[
"MIT"
] | null | null | null |
setup.py
|
raviraina/pybagit
|
86ac67038f57d350dd823c0878328376554b2838
|
[
"MIT"
] | null | null | null |
setup.py
|
raviraina/pybagit
|
86ac67038f57d350dd823c0878328376554b2838
|
[
"MIT"
] | 1
|
2021-06-07T19:26:01.000Z
|
2021-06-07T19:26:01.000Z
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
longdesc = """
PyBagIt Version 1.5.3
This module helps with creating an managing BagIt-compliant packages. It has
been created to conform to BagIt v0.97.
Documentation is available at http://ahankinson.github.io/pybagit.
Code hosting is available on GitHub at http://github.com/ahankinson/pybagit/.
Requirements
------------
Tested with Python 2.6+ (2.5 or lower will not work.)
No external modules required.
Module has not been tested on Windows.
Running Tests
-------------
There are a number of unit tests written to verify that this module functions
as expected. To run this, simply type
python setup.py test
in the package directory. NOTE: You will need a network connection to verify
the 'fetch' tests. If you don't have a network connection you can skip these
tests by commenting them out in 'bagtest.py'
Setup and Install
-----------------
To install this module, simply run:
python setup.py install
You can also install it with easy_install:
easy_install pybagit
(you may need to run these commands as a privileged user, e.g. 'sudo')
"""
setup(
name="pybagit",
long_description=longdesc,
version="1.5.3",
url="http://ahankinson.github.io/pybagit",
author="Andrew Hankinson",
author_email="andrew.hankinson@mail.mcgill.ca",
license="http://www.opensource.org/licenses/mit-license.php",
packages=find_packages(exclude=["ez_setup"]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Archiving :: Packaging",
],
include_package_data=True,
description="A library for dealing with BagIt packages.",
test_suite="bagtest",
)
| 27.837838
| 77
| 0.707282
|
9d7c00120a66fb09c07e5619c2544cbe96451947
| 135,435
|
py
|
Python
|
src/sage/plot/plot.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/plot/plot.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/plot/plot.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:58.000Z
|
2020-07-23T10:29:58.000Z
|
r"""
2D Plotting
Sage provides extensive 2D plotting functionality. The underlying
rendering is done using the matplotlib Python library.
The following graphics primitives are supported:
- :func:`~sage.plot.arrow.arrow` - an arrow from a min point to a max point.
- :func:`~sage.plot.circle.circle` - a circle with given radius
- :func:`~sage.plot.ellipse.ellipse` - an ellipse with given radii
and angle
- :func:`~sage.plot.arc.arc` - an arc of a circle or an ellipse
- :func:`~sage.plot.disk.disk` - a filled disk (i.e. a sector or wedge of a circle)
- :func:`~sage.plot.line.line` - a line determined by a sequence of points (this need not
be straight!)
- :func:`~sage.plot.point.point` - a point
- :func:`~sage.plot.text.text` - some text
- :func:`~sage.plot.polygon.polygon` - a filled polygon
The following plotting functions are supported:
- :func:`plot` - plot of a function or other Sage object (e.g., elliptic
curve).
- :func:`parametric_plot`
- :func:`~sage.plot.contour_plot.implicit_plot`
- :func:`polar_plot`
- :func:`~sage.plot.contour_plot.region_plot`
- :func:`list_plot`
- :func:`~sage.plot.scatter_plot.scatter_plot`
- :func:`~sage.plot.bar_chart.bar_chart`
- :func:`~sage.plot.contour_plot.contour_plot`
- :func:`~sage.plot.density_plot.density_plot`
- :func:`~sage.plot.plot_field.plot_vector_field`
- :func:`~sage.plot.plot_field.plot_slope_field`
- :func:`~sage.plot.matrix_plot.matrix_plot`
- :func:`~sage.plot.complex_plot.complex_plot`
- :func:`graphics_array`
- The following log plotting functions:
- :func:`plot_loglog`
- :func:`plot_semilogx` and :func:`plot_semilogy`
- :func:`list_plot_loglog`
- :func:`list_plot_semilogx` and :func:`list_plot_semilogy`
The following miscellaneous Graphics functions are included:
- :func:`Graphics`
- :func:`is_Graphics`
- :func:`~sage.plot.colors.hue`
Type ``?`` after each primitive in Sage for help and examples.
EXAMPLES:
We draw a curve::
sage: plot(x^2, (x,0,5))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,0,5))
sphinx_plot(g)
We draw a circle and a curve::
sage: circle((1,1), 1) + plot(x^2, (x,0,5))
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = circle((1,1), 1) + plot(x**2, (x,0,5))
sphinx_plot(g)
Notice that the aspect ratio of the above plot makes the plot very tall
because the plot adopts the default aspect ratio of the circle (to make
the circle appear like a circle). We can change the aspect ratio to be
what we normally expect for a plot by explicitly asking for an
'automatic' aspect ratio::
sage: show(circle((1,1), 1) + plot(x^2, (x,0,5)), aspect_ratio='automatic')
The aspect ratio describes the apparently height/width ratio of a unit
square. If you want the vertical units to be twice as big as the
horizontal units, specify an aspect ratio of 2::
sage: show(circle((1,1), 1) + plot(x^2, (x,0,5)), aspect_ratio=2)
The ``figsize`` option adjusts the figure size. The default figsize is
4. To make a figure that is roughly twice as big, use ``figsize=8``::
sage: show(circle((1,1), 1) + plot(x^2, (x,0,5)), figsize=8)
You can also give separate horizontal and vertical dimensions. Both
will be measured in inches::
sage: show(circle((1,1), 1) + plot(x^2, (x,0,5)), figsize=[4,8])
However, do not make the figsize too big (e.g. one dimension greater
than 327 or both in the mid-200s) as this will lead to errors or crashes.
See :meth:`~sage.plot.graphics.Graphics.show` for full details.
Note that the axes will not cross if the data is not on both sides of
both axes, even if it is quite close::
sage: plot(x^3, (x,1,10))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**3, (x,1,10))
sphinx_plot(g)
When the labels have quite different orders of magnitude or are very
large, scientific notation (the `e` notation for powers of ten) is used::
sage: plot(x^2, (x,480,500)) # no scientific notation
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,480,500))
sphinx_plot(g)
::
sage: plot(x^2, (x,300,500)) # scientific notation on y-axis
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,300,500))
sphinx_plot(g)
But you can fix your own tick labels, if you know what to expect and
have a preference::
sage: plot(x^2, (x,300,500), ticks=[100,50000])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,300,500), ticks=[100,50000])
sphinx_plot(g)
To change the ticks on one axis only, use the following notation::
sage: plot(x^2, (x,300,500), ticks=[None,50000])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,300,500), ticks=[None,50000])
sphinx_plot(g)
You can even have custom tick labels along with custom positioning. ::
sage: plot(x^2, (x,0,3), ticks=[[1,2.5],pi/2], tick_formatter=[["$x_1$","$x_2$"],pi]) # long time
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,0,3), ticks=[[1,2.5],pi/2], tick_formatter=[["$x_1$","$x_2$"],pi])
sphinx_plot(g)
We construct a plot involving several graphics objects::
sage: G = plot(cos(x), (x, -5, 5), thickness=5, color='green', title='A plot')
sage: P = polygon([[1,2], [5,6], [5,0]], color='red')
sage: G + P
Graphics object consisting of 2 graphics primitives
.. PLOT::
G = plot(cos(x), (x, -5, 5), thickness=5, color='green', title='A plot')
P = polygon([[1,2], [5,6], [5,0]], color='red')
sphinx_plot(G + P)
Next we construct the reflection of the above polygon about the
`y`-axis by iterating over the list of first-coordinates of
the first graphic element of ``P`` (which is the actual
Polygon; note that ``P`` is a Graphics object, which consists
of a single polygon)::
sage: Q = polygon([(-x,y) for x,y in P[0]], color='blue')
sage: Q # show it
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = polygon([[1,2], [5,6], [5,0]], color='red')
Q = polygon([(-x,y) for x,y in P[0]], color='blue')
sphinx_plot(Q)
We combine together different graphics objects using "+"::
sage: H = G + P + Q
sage: print(H)
Graphics object consisting of 3 graphics primitives
sage: type(H)
<class 'sage.plot.graphics.Graphics'>
sage: H[1]
Polygon defined by 3 points
sage: list(H[1])
[(1.0, 2.0), (5.0, 6.0), (5.0, 0.0)]
sage: H # show it
Graphics object consisting of 3 graphics primitives
.. PLOT::
G = plot(cos(x), (x, -5, 5), thickness=5, color='green', title='A plot')
P = polygon([[1,2], [5,6], [5,0]], color='red')
Q = polygon([(-x,y) for x,y in P[0]], color='blue')
H = G + P + Q
sphinx_plot(H)
We can put text in a graph::
sage: L = [[cos(pi*i/100)^3,sin(pi*i/100)] for i in range(200)]
sage: p = line(L, rgbcolor=(1/4,1/8,3/4))
sage: t = text('A Bulb', (1.5, 0.25))
sage: x = text('x axis', (1.5,-0.2))
sage: y = text('y axis', (0.4,0.9))
sage: g = p+t+x+y
sage: g.show(xmin=-1.5, xmax=2, ymin=-1, ymax=1)
.. PLOT::
L = [[cos(pi*i/100)**3,sin(pi*i/100)] for i in range(200)]
p = line(L, rgbcolor=(1.0/4.0,1.0/8.0,3.0/4.0))
t = text('A Bulb', (1.5, 0.25))
x = text('x axis', (1.5,-0.2))
y = text('y axis', (0.4,0.9))
g = p+t+x+y
g.xmin(-1.5)
g.xmax(2)
g.ymin(-1)
g.ymax(1)
sphinx_plot(g)
We can add a title to a graph::
sage: x = var('x')
sage: plot(x^2, (x,-2,2), title='A plot of $x^2$')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g=plot(x**2, (x,-2,2), title='A plot of $x^2$')
sphinx_plot(g)
We can set the position of the title::
sage: plot(x^2, (-2,2), title='Plot of $x^2$', title_pos=(0.5,-0.05))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g=plot(x**2, (-2,2), title='Plot of $x^2$', title_pos=(0.5,-0.05))
sphinx_plot(g)
We plot the Riemann zeta function along the critical line and see
the first few zeros::
sage: i = CDF.0 # define i this way for maximum speed.
sage: p1 = plot(lambda t: arg(zeta(0.5+t*i)), 1, 27, rgbcolor=(0.8,0,0))
sage: p2 = plot(lambda t: abs(zeta(0.5+t*i)), 1, 27, color=hue(0.7))
sage: print(p1 + p2)
Graphics object consisting of 2 graphics primitives
sage: p1 + p2 # display it
Graphics object consisting of 2 graphics primitives
.. PLOT::
from sage.rings.complex_double import ComplexDoubleElement
i = ComplexDoubleElement(0,1) # define i this way for maximum speed.
p1 = plot(lambda t: arg(zeta(0.5+t*i)), 1, 27, rgbcolor=(0.8,0,0))
p2 = plot(lambda t: abs(zeta(0.5+t*i)), 1, 27, color=hue(0.7))
g = p1 + p2
sphinx_plot(g)
.. NOTE::
Not all functions in Sage are symbolic. When plotting non-symbolic functions
they should be wrapped in ``lambda``::
sage: plot(lambda x:fibonacci(round(x)), (x,1,10))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g=plot(lambda x:fibonacci(round(x)), (x,1,10))
sphinx_plot(g)
Many concentric circles shrinking toward the origin::
sage: show(sum(circle((i,0), i, hue=sin(i/10)) for i in [10,9.9,..,0])) # long time
.. PLOT::
g = sum(circle((i,0), i, hue=sin(i/10)) for i in srange(0,10,0.1))
sphinx_plot(g)
Here is a pretty graph::
sage: g = Graphics()
sage: for i in range(60):
....: p = polygon([(i*cos(i),i*sin(i)), (0,i), (i,0)],\
....: color=hue(i/40+0.4), alpha=0.2)
....: g = g + p
sage: g.show(dpi=200, axes=False)
.. PLOT::
g=Graphics()
for i in range(60):
# i/40 doesn't convert to real number
p = polygon([(i*cos(i),i*sin(i)), (0,i), (i,0)],\
color=hue(0.025*i+0.4), alpha=0.2)
g = g + p
g.axes(False)
sphinx_plot(g)
Another graph::
sage: x = var('x')
sage: P = plot(sin(x)/x, -4, 4, color='blue') + \
....: plot(x*cos(x), -4, 4, color='red') + \
....: plot(tan(x), -4, 4, color='green')
sage: P.show(ymin=-pi, ymax=pi)
.. PLOT::
g = plot(sin(x)/x, -4, 4, color='blue') + \
plot(x*cos(x), -4, 4, color='red') + \
plot(tan(x), -4, 4, color='green')
g.ymin(-pi)
g.ymax(pi)
sphinx_plot(g)
PYX EXAMPLES: These are some examples of plots similar to some of
the plots in the PyX (http://pyx.sourceforge.net) documentation:
Symbolline::
sage: y(x) = x*sin(x^2)
sage: v = [(x, y(x)) for x in [-3,-2.95,..,3]]
sage: show(points(v, rgbcolor=(0.2,0.6, 0.1), pointsize=30) + plot(spline(v), -3.1, 3))
.. PLOT::
#y(x)=x*sin(x**2) gave SyntaxError: can't assign to function call
def y(x): return x*sin(x**2)
v=list()
for x in srange(-3,3,0.05):
v.append((x, y(x)))
g = points(v, rgbcolor=(0.2,0.6, 0.1), pointsize=30) + plot(spline(v), -3.1, 3)
sphinx_plot(g)
Cycliclink::
sage: x = var('x')
sage: g1 = plot(cos(20*x)*exp(-2*x), 0, 1)
sage: g2 = plot(2*exp(-30*x) - exp(-3*x), 0, 1)
sage: show(graphics_array([g1, g2], 2, 1), xmin=0)
.. PLOT::
g1 = plot(cos(20*x)*exp(-2*x), 0, 1)
g2 = plot(2*exp(-30*x) - exp(-3*x), 0, 1)
g = graphics_array([g1, g2], 2, 1)
sphinx_plot(g)
Pi Axis::
sage: g1 = plot(sin(x), 0, 2*pi)
sage: g2 = plot(cos(x), 0, 2*pi, linestyle="--")
sage: (g1+g2).show(ticks=pi/6, tick_formatter=pi) # long time # show their sum, nicely formatted
.. PLOT::
g1 = plot(sin(x), 0, 2*pi, ticks=pi/6, tick_formatter=pi)
g2 = plot(cos(x), 0, 2*pi, linestyle="--", ticks=pi/6, tick_formatter=pi)
sphinx_plot(g1+g2)
An illustration of integration::
sage: f(x) = (x-3)*(x-5)*(x-7)+40
sage: P = line([(2,0),(2,f(2))], color='black')
sage: P += line([(8,0),(8,f(8))], color='black')
sage: P += polygon([(2,0),(2,f(2))] + [(x, f(x)) for x in [2,2.1,..,8]] + [(8,0),(2,0)], rgbcolor=(0.8,0.8,0.8),aspect_ratio='automatic')
sage: P += text("$\\int_{a}^b f(x) dx$", (5, 20), fontsize=16, color='black')
sage: P += plot(f, (1, 8.5), thickness=3)
sage: P # show the result
Graphics object consisting of 5 graphics primitives
.. PLOT::
#inline f substitution to avoid SyntaxError: can't assign to function call in sphinx_plot
def f(x): return (x-3)*(x-5)*(x-7)+40
P = line([(2,0),(2,f(2))], color='black')
P = P + line([(8,0),(8,f(8))], color='black')
L = list(((2,0), (2,f(2))))
for i in srange(2,8.1,0.1):
L.append((i,f(i)))
L.append((8,0))
L.append((2,0))
P = P + polygon(L, rgbcolor=(0.8,0.8,0.8), aspect_ratio='automatic')
P = P + text("$\\int_{a}^b f(x) dx$", (5, 20), fontsize=16, color='black')
P = P + plot(f, (1, 8.5), thickness=3)
sphinx_plot(P)
NUMERICAL PLOTTING:
Sage includes Matplotlib, which provides 2D plotting with an interface
that is a likely very familiar to people doing numerical
computation.
You can use ``plt.clf()`` to clear the current image frame
and ``plt.close()`` to close it.
For example,
::
sage: import pylab as plt
sage: t = plt.arange(0.0, 2.0, 0.01)
sage: s = sin(2*pi*t)
sage: P = plt.plot(t, s, linewidth=1.0)
sage: xl = plt.xlabel('time (s)')
sage: yl = plt.ylabel('voltage (mV)')
sage: t = plt.title('About as simple as it gets, folks')
sage: plt.grid(True)
sage: plt.savefig(os.path.join(SAGE_TMP, 'sage.png'))
sage: plt.clf()
sage: plt.savefig(os.path.join(SAGE_TMP, 'blank.png'))
sage: plt.close()
sage: plt.imshow([[1,2],[0,1]])
<matplotlib.image.AxesImage object at ...>
We test that ``imshow`` works as well, verifying that
:trac:`2900` is fixed (in Matplotlib).
::
sage: plt.imshow([[(0.0,0.0,0.0)]])
<matplotlib.image.AxesImage object at ...>
sage: plt.savefig(os.path.join(SAGE_TMP, 'foo.png'))
Since the above overwrites many Sage plotting functions, we reset
the state of Sage, so that the examples below work!
::
sage: reset()
See http://matplotlib.sourceforge.net for complete documentation
about how to use Matplotlib.
TESTS:
We test dumping and loading a plot.
::
sage: p = plot(sin(x), (x, 0,2*pi))
sage: Q = loads(dumps(p))
Verify that a clean sage startup does *not* import matplotlib::
sage: os.system("sage -c \"if 'matplotlib' in sys.modules: sys.exit(1)\"") # long time
0
AUTHORS:
- Alex Clemesha and William Stein (2006-04-10): initial version
- David Joyner: examples
- Alex Clemesha (2006-05-04) major update
- William Stein (2006-05-29): fine tuning, bug fixes, better server
integration
- William Stein (2006-07-01): misc polish
- Alex Clemesha (2006-09-29): added contour_plot, frame axes, misc
polishing
- Robert Miller (2006-10-30): tuning, NetworkX primitive
- Alex Clemesha (2006-11-25): added plot_vector_field, matrix_plot,
arrow, bar_chart, Axes class usage (see axes.py)
- Bobby Moretti and William Stein (2008-01): Change plot to specify
ranges using the (varname, min, max) notation.
- William Stein (2008-01-19): raised the documentation coverage from a
miserable 12 percent to a 'wopping' 35 percent, and fixed and
clarified numerous small issues.
- Jason Grout (2009-09-05): shifted axes and grid functionality over
to matplotlib; fixed a number of smaller issues.
- Jason Grout (2010-10): rewrote aspect ratio portions of the code
- Jeroen Demeyer (2012-04-19): move parts of this file to graphics.py (:trac:`12857`)
- Aaron Lauve (2016-07-13): reworked handling of 'color' when passed
a list of functions; now more in-line with other CAS's. Added list functionality
to linestyle and legend_label options as well. (:trac:`12962`)
"""
#*****************************************************************************
# Copyright (C) 2006 Alex Clemesha <clemesha@gmail.com>
# Copyright (C) 2006-2008 William Stein <wstein@gmail.com>
# Copyright (C) 2010 Jason Grout
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import
from six.moves import range
from six import iteritems
from functools import reduce
## IMPORTANT: Do *not* import matplotlib at module scope. It takes a
## surprisingly long time to initialize itself. It's better if it is
## imported in functions, so it only gets started if it is actually
## going to be used.
#DEFAULT_FIGSIZE=(6, 3.70820393249937)
EMBEDDED_MODE = False
import sage.misc.misc
from sage.arith.srange import srange
from sage.misc.randstate import current_randstate #for plot adaptive refinement
from math import sin, cos, pi #for polar_plot
from sage.ext.fast_eval import fast_float, is_fast_float
from sage.misc.decorators import options
from .graphics import Graphics, GraphicsArray
from sage.plot.polygon import polygon
# import of line2d below is only for redirection of imports
from sage.plot.line import line, line2d
#Currently not used - see comment immediately above about
#figure.canvas.mpl_connect('draw_event', pad_for_tick_labels)
# TODO - figure out how to use this, add documentation
#def pad_for_tick_labels(event):
# import matplotlib.transforms as mtransforms
# figure=event.canvas.figure
# bboxes = []
# for ax in figure.axes:
# bbox = ax.xaxis.get_label().get_window_extent()
# # the figure transform goes from relative coords->pixels and we
# # want the inverse of that
# bboxi = bbox.inverse_transformed(figure.transFigure)
# bboxes.append(bboxi)
#
# bbox = ax.yaxis.get_label().get_window_extent()
# bboxi = bbox.inverse_transformed(figure.transFigure)
# bboxes.append(bboxi)
# for label in (ax.get_xticklabels()+ax.get_yticklabels() \
# + ax.get_xticklabels(minor=True) \
# +ax.get_yticklabels(minor=True)):
# bbox = label.get_window_extent()
# bboxi = bbox.inverse_transformed(figure.transFigure)
# bboxes.append(bboxi)
#
# # this is the bbox that bounds all the bboxes, again in relative
# # figure coords
# bbox = mtransforms.Bbox.union(bboxes)
# adjusted=adjust_figure_to_contain_bbox(figure,bbox)
#
# if adjusted:
# figure.canvas.draw()
# return False
#
#Currently not used - see comment above about
#figure.canvas.mpl_connect('draw_event', pad_for_tick_labels)
# TODO - figure out how to use this, add documentation
#def adjust_figure_to_contain_bbox(fig, bbox,pad=1.1):
# """
# For each amount we are over (in axes coordinates), we adjust by over*pad
# to give ourselves a bit of padding.
# """
# left=fig.subplotpars.left
# bottom=fig.subplotpars.bottom
# right=fig.subplotpars.right
# top=fig.subplotpars.top
#
# adjusted=False
# if bbox.xmin<0:
# left-=bbox.xmin*pad
# adjusted=True
# if bbox.ymin<0:
# bottom-=bbox.ymin*pad
# adjusted=True
# if bbox.xmax>1:
# right-=(bbox.xmax-1)*pad
# adjusted=True
# if bbox.ymax>1:
# top-=(bbox.ymax-1)*pad
# adjusted=True
#
# if left<right and bottom<top:
# fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top)
# return adjusted
# else:
# return False
_SelectiveFormatterClass = None
def SelectiveFormatter(formatter, skip_values):
"""
This matplotlib formatter selectively omits some tick values and
passes the rest on to a specified formatter.
EXAMPLES:
This example is almost straight from a matplotlib example.
::
sage: from sage.plot.plot import SelectiveFormatter
sage: import matplotlib.pyplot as plt
sage: import numpy
sage: fig=plt.figure()
sage: ax=fig.add_subplot(111)
sage: t = numpy.arange(0.0, 2.0, 0.01)
sage: s = numpy.sin(2*numpy.pi*t)
sage: p = ax.plot(t, s)
sage: formatter=SelectiveFormatter(ax.xaxis.get_major_formatter(),skip_values=[0,1])
sage: ax.xaxis.set_major_formatter(formatter)
sage: fig.savefig(os.path.join(SAGE_TMP, 'test.png'))
"""
global _SelectiveFormatterClass
if _SelectiveFormatterClass is None:
from matplotlib.ticker import Formatter
class _SelectiveFormatterClass(Formatter):
def __init__(self, formatter,skip_values):
"""
Initialize a SelectiveFormatter object.
INPUT:
- formatter -- the formatter object to which we should pass labels
- skip_values -- a list of values that we should skip when
formatting the tick labels
EXAMPLES::
sage: from sage.plot.plot import SelectiveFormatter
sage: import matplotlib.pyplot as plt
sage: import numpy
sage: fig=plt.figure()
sage: ax=fig.add_subplot(111)
sage: t = numpy.arange(0.0, 2.0, 0.01)
sage: s = numpy.sin(2*numpy.pi*t)
sage: line=ax.plot(t, s)
sage: formatter=SelectiveFormatter(ax.xaxis.get_major_formatter(),skip_values=[0,1])
sage: ax.xaxis.set_major_formatter(formatter)
sage: fig.savefig(os.path.join(SAGE_TMP, 'test.png'))
"""
self.formatter=formatter
self.skip_values=skip_values
def set_locs(self, locs):
"""
Set the locations for the ticks that are not skipped.
EXAMPLES::
sage: from sage.plot.plot import SelectiveFormatter
sage: import matplotlib.ticker
sage: formatter=SelectiveFormatter(matplotlib.ticker.Formatter(),skip_values=[0,200])
sage: formatter.set_locs([i*100 for i in range(10)])
"""
self.formatter.set_locs([l for l in locs if l not in self.skip_values])
def __call__(self, x, *args, **kwds):
"""
Return the format for tick val *x* at position *pos*
EXAMPLES::
sage: from sage.plot.plot import SelectiveFormatter
sage: import matplotlib.ticker
sage: formatter=SelectiveFormatter(matplotlib.ticker.FixedFormatter(['a','b']),skip_values=[0,2])
sage: [formatter(i,1) for i in range(10)]
['', 'b', '', 'b', 'b', 'b', 'b', 'b', 'b', 'b']
"""
if x in self.skip_values:
return ''
else:
return self.formatter(x, *args, **kwds)
return _SelectiveFormatterClass(formatter, skip_values)
def xydata_from_point_list(points):
r"""
Returns two lists (xdata, ydata), each coerced to a list of floats,
which correspond to the x-coordinates and the y-coordinates of the
points.
The points parameter can be a list of 2-tuples or some object that
yields a list of one or two numbers.
This function can potentially be very slow for large point sets.
TESTS::
sage: from sage.plot.plot import xydata_from_point_list
sage: xydata_from_point_list([CC(0), CC(1)]) # ticket 8082
([0.0, 1.0], [0.0, 0.0])
This function should work for anything than can be turned into a
list, such as iterators and such (see :trac:`10478`)::
sage: xydata_from_point_list(iter([(0,0), (sqrt(3), 2)]))
([0.0, 1.7320508075688772], [0.0, 2.0])
sage: xydata_from_point_list((x, x^2) for x in range(5))
([0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 1.0, 4.0, 9.0, 16.0])
sage: xydata_from_point_list(enumerate(prime_range(1, 15)))
([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], [2.0, 3.0, 5.0, 7.0, 11.0, 13.0])
sage: from builtins import zip
sage: xydata_from_point_list(list(zip([2,3,5,7], [11, 13, 17, 19])))
([2.0, 3.0, 5.0, 7.0], [11.0, 13.0, 17.0, 19.0])
"""
from sage.rings.complex_number import ComplexNumber
if not isinstance(points, (list, tuple)):
points = list(points)
try:
points = [[float(z) for z in points]]
except TypeError:
pass
elif len(points) == 2 and not isinstance(points[0], (list, tuple,
ComplexNumber)):
try:
points = [[float(z) for z in points]]
except TypeError:
pass
if len(points) and len(list(points[0])) != 2:
raise ValueError("points must have 2 coordinates in a 2d line")
xdata = [float(z[0]) for z in points]
ydata = [float(z[1]) for z in points]
return xdata, ydata
@options(alpha=1, thickness=1, fill=False, fillcolor='automatic', fillalpha=0.5, plot_points=200,
adaptive_tolerance=0.01, adaptive_recursion=5, detect_poles=False, exclude=None, legend_label=None,
__original_opts=True, aspect_ratio='automatic')
def plot(funcs, *args, **kwds):
r"""
Use plot by writing
``plot(X, ...)``
where `X` is a Sage object (or list of Sage objects) that
either is callable and returns numbers that can be coerced to
floats, or has a plot method that returns a
``GraphicPrimitive`` object.
There are many other specialized 2D plot commands available
in Sage, such as ``plot_slope_field``, as well as various
graphics primitives like :class:`~sage.plot.arrow.Arrow`;
type ``sage.plot.plot?`` for a current list.
Type ``plot.options`` for a dictionary of the default
options for plots. You can change this to change the defaults for
all future plots. Use ``plot.reset()`` to reset to the
default options.
PLOT OPTIONS:
- ``plot_points`` - (default: 200) the minimal number of plot points.
- ``adaptive_recursion`` - (default: 5) how many levels of recursion to go
before giving up when doing adaptive refinement. Setting this to 0
disables adaptive refinement.
- ``adaptive_tolerance`` - (default: 0.01) how large a difference should be
before the adaptive refinement code considers it significant. See the
documentation further below for more information, starting at "the
algorithm used to insert".
- ``base`` - (default: 10) the base of the logarithm if
a logarithmic scale is set. This must be greater than 1. The base
can be also given as a list or tuple ``(basex, basey)``.
``basex`` sets the base of the logarithm along the horizontal
axis and ``basey`` sets the base along the vertical axis.
- ``scale`` -- (default: ``"linear"``) string. The scale of the axes.
Possible values are ``"linear"``, ``"loglog"``, ``"semilogx"``,
``"semilogy"``.
The scale can be also be given as single argument that is a list
or tuple ``(scale, base)`` or ``(scale, basex, basey)``.
The ``"loglog"`` scale sets both the horizontal and vertical axes to
logarithmic scale. The ``"semilogx"`` scale sets the horizontal axis
to logarithmic scale. The ``"semilogy"`` scale sets the vertical axis
to logarithmic scale. The ``"linear"`` scale is the default value
when :class:`~sage.plot.graphics.Graphics` is initialized.
- ``xmin`` - starting x value in the rendered figure. This parameter is
passed directly to the ``show`` procedure and it could be overwritten.
- ``xmax`` - ending x value in the rendered figure. This parameter is passed
directly to the ``show`` procedure and it could be overwritten.
- ``ymin`` - starting y value in the rendered figure. This parameter is
passed directly to the ``show`` procedure and it could be overwritten.
- ``ymax`` - ending y value in the rendered figure. This parameter is passed
directly to the ``show`` procedure and it could be overwritten.
- ``detect_poles`` - (Default: False) If set to True poles are detected.
If set to "show" vertical asymptotes are drawn.
- ``legend_label`` - a (TeX) string serving as the label for `X` in the legend.
If `X` is a list, then this option can be a single string, or a list or dictionary
with strings as entries/values. If a dictionary, then keys are taken from ``range(len(X))``.
.. note::
- If the ``scale`` is ``"linear"``, then irrespective of what
``base`` is set to, it will default to 10 and will remain unused.
- If you want to limit the plot along the horizontal axis in the
final rendered figure, then pass the ``xmin`` and ``xmax``
keywords to the :meth:`~sage.plot.graphics.Graphics.show` method.
To limit the plot along the vertical axis, ``ymin`` and ``ymax``
keywords can be provided to either this ``plot`` command or to
the ``show`` command.
- This function does NOT simply sample equally spaced points
between xmin and xmax. Instead it computes equally spaced points
and adds small perturbations to them. This reduces the possibility
of, e.g., sampling `\sin` only at multiples of `2\pi`, which would
yield a very misleading graph.
- If there is a range of consecutive points where the function has
no value, then those points will be excluded from the plot. See
the example below on automatic exclusion of points.
- For the other keyword options that the ``plot`` function can
take, refer to the method :meth:`~sage.plot.graphics.Graphics.show`
and the further options below.
COLOR OPTIONS:
- ``color`` - (Default: 'blue') One of:
- an RGB tuple (r,g,b) with each of r,g,b between 0 and 1.
- a color name as a string (e.g., 'purple').
- an HTML color such as '#aaff0b'.
- a list or dictionary of colors (valid only if `X` is a list):
if a dictionary, keys are taken from ``range(len(X))``;
the entries/values of the list/dictionary may be any of the options above.
- 'automatic' -- maps to default ('blue') if `X` is a single Sage object; and
maps to a fixed sequence of regularly spaced colors if `X` is a list.
- ``legend_color`` - the color of the text for `X` (or each item in `X`) in the legend.
Default color is 'black'. Options are as in ``color`` above, except that the choice 'automatic' maps to 'black' if `X` is a single Sage object.
- ``fillcolor`` - The color of the fill for the plot of `X` (or each item in `X`).
Default color is 'gray' if `X` is a single Sage object or if ``color`` is a single color. Otherwise, options are as in ``color`` above.
APPEARANCE OPTIONS:
The following options affect the appearance of
the line through the points on the graph of `X` (these are
the same as for the line function):
INPUT:
- ``alpha`` - How transparent the line is
- ``thickness`` - How thick the line is
- ``rgbcolor`` - The color as an RGB tuple
- ``hue`` - The color given as a hue
LINE OPTIONS:
Any MATPLOTLIB line option may also be passed in. E.g.,
- ``linestyle`` - (default: "-") The style of the line, which is one of
- ``"-"`` or ``"solid"``
- ``"--"`` or ``"dashed"``
- ``"-."`` or ``"dash dot"``
- ``":"`` or ``"dotted"``
- ``"None"`` or ``" "`` or ``""`` (nothing)
- a list or dictionary (see below)
The linestyle can also be prefixed with a drawing style (e.g., ``"steps--"``)
- ``"default"`` (connect the points with straight lines)
- ``"steps"`` or ``"steps-pre"`` (step function; horizontal
line is to the left of point)
- ``"steps-mid"`` (step function; points are in the middle of
horizontal lines)
- ``"steps-post"`` (step function; horizontal line is to the
right of point)
If `X` is a list, then ``linestyle`` may be a list (with entries
taken from the strings above) or a dictionary (with keys in ``range(len(X))``
and values taken from the strings above).
- ``marker`` - The style of the markers, which is one of
- ``"None"`` or ``" "`` or ``""`` (nothing) -- default
- ``","`` (pixel), ``"."`` (point)
- ``"_"`` (horizontal line), ``"|"`` (vertical line)
- ``"o"`` (circle), ``"p"`` (pentagon), ``"s"`` (square), ``"x"`` (x), ``"+"`` (plus), ``"*"`` (star)
- ``"D"`` (diamond), ``"d"`` (thin diamond)
- ``"H"`` (hexagon), ``"h"`` (alternative hexagon)
- ``"<"`` (triangle left), ``">"`` (triangle right), ``"^"`` (triangle up), ``"v"`` (triangle down)
- ``"1"`` (tri down), ``"2"`` (tri up), ``"3"`` (tri left), ``"4"`` (tri right)
- ``0`` (tick left), ``1`` (tick right), ``2`` (tick up), ``3`` (tick down)
- ``4`` (caret left), ``5`` (caret right), ``6`` (caret up), ``7`` (caret down), ``8`` (octagon)
- ``"$...$"`` (math TeX string)
- ``(numsides, style, angle)`` to create a custom, regular symbol
- ``numsides`` -- the number of sides
- ``style`` -- ``0`` (regular polygon), ``1`` (star shape), ``2`` (asterisk), ``3`` (circle)
- ``angle`` -- the angular rotation in degrees
- ``markersize`` - the size of the marker in points
- ``markeredgecolor`` -- the color of the marker edge
- ``markerfacecolor`` -- the color of the marker face
- ``markeredgewidth`` - the size of the marker edge in points
- ``exclude`` - (Default: None) values which are excluded from the plot range.
Either a list of real numbers, or an equation in one variable.
FILLING OPTIONS:
- ``fill`` - (Default: False) One of:
- "axis" or True: Fill the area between the function and the x-axis.
- "min": Fill the area between the function and its minimal value.
- "max": Fill the area between the function and its maximal value.
- a number c: Fill the area between the function and the horizontal line y = c.
- a function g: Fill the area between the function that is plotted and g.
- a dictionary ``d`` (only if a list of functions are plotted):
The keys of the dictionary should be integers.
The value of ``d[i]`` specifies the fill options for the i-th function
in the list. If ``d[i] == [j]``: Fill the area between the i-th and
the j-th function in the list. (But if ``d[i] == j``: Fill the area
between the i-th function in the list and the horizontal line y = j.)
- ``fillalpha`` - (default: 0.5) How transparent the fill is.
A number between 0 and 1.
MATPLOTLIB STYLE SHEET OPTION:
- ``stylesheet`` - (Default: classic) Support for loading a full matplotlib style sheet.
Any style sheet listed in ``matplotlib.pyplot.style.available`` is acceptable. If a
non-existing style is provided the default classic is applied.
EXAMPLES:
We plot the `\sin` function::
sage: P = plot(sin, (0,10)); print(P)
Graphics object consisting of 1 graphics primitive
sage: len(P) # number of graphics primitives
1
sage: len(P[0]) # how many points were computed (random)
225
sage: P # render
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = plot(sin, (0,10))
sphinx_plot(P)
::
sage: P = plot(sin, (0,10), plot_points=10); print(P)
Graphics object consisting of 1 graphics primitive
sage: len(P[0]) # random output
32
sage: P # render
Graphics object consisting of 1 graphics primitive
.. PLOT::
P = plot(sin, (0,10), plot_points=10)
sphinx_plot(P)
We plot with ``randomize=False``, which makes the initial sample points
evenly spaced (hence always the same). Adaptive plotting might
insert other points, however, unless ``adaptive_recursion=0``.
::
sage: p=plot(1, (x,0,3), plot_points=4, randomize=False, adaptive_recursion=0)
sage: list(p[0])
[(0.0, 1.0), (1.0, 1.0), (2.0, 1.0), (3.0, 1.0)]
Some colored functions::
sage: plot(sin, 0, 10, color='purple')
Graphics object consisting of 1 graphics primitive
.. PLOT::
P=plot(sin,0,10,color='purple')
sphinx_plot(P)
::
sage: plot(sin, 0, 10, color='#ff00ff')
Graphics object consisting of 1 graphics primitive
.. PLOT::
P=plot(sin, 0, 10, color='#ff00ff')
sphinx_plot(P)
We plot several functions together by passing a list of functions
as input::
sage: plot([x*exp(-n*x^2)/.4 for n in [1..5]], (0, 2), aspect_ratio=.8)
Graphics object consisting of 5 graphics primitives
.. PLOT::
g = plot([x*exp(-n*x**2)/.4 for n in range(1,6)], (0, 2), aspect_ratio=.8)
sphinx_plot(g)
By default, color will change from one primitive to the next.
This may be controlled by modifying ``color`` option::
sage: g1 = plot([x*exp(-n*x^2)/.4 for n in [1..3]], (0, 2), color='blue', aspect_ratio=.8); g1
Graphics object consisting of 3 graphics primitives
sage: g2 = plot([x*exp(-n*x^2)/.4 for n in [1..3]], (0, 2), color=['red','red','green'], linestyle=['-','--','-.'], aspect_ratio=.8); g2
Graphics object consisting of 3 graphics primitives
.. PLOT::
g1 = plot([x*exp(-n*x**2)/.4 for n in range(1,4)], (0, 2), color='blue', aspect_ratio=.8)
g2 = plot([x*exp(-n*x**2)/.4 for n in range(1,4)], (0, 2), color=['red','red','green'], linestyle=['-','--','-.'], aspect_ratio=.8)
sphinx_plot(graphics_array([[g1], [g2]]))
We can also build a plot step by step from an empty plot::
sage: a = plot([]); a # passing an empty list returns an empty plot (Graphics() object)
Graphics object consisting of 0 graphics primitives
sage: a += plot(x**2); a # append another plot
Graphics object consisting of 1 graphics primitive
.. PLOT::
a = plot([])
a = a + plot(x**2)
sphinx_plot(a)
::
sage: a += plot(x**3); a # append yet another plot
Graphics object consisting of 2 graphics primitives
.. PLOT::
a = plot([])
a = a + plot(x**2)
a = a + plot(x**3)
sphinx_plot(a)
The function `\sin(1/x)` wiggles wildly near `0`.
Sage adapts to this and plots extra points near the origin.
::
sage: plot(sin(1/x), (x, -1, 1))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(1/x), (x, -1, 1))
sphinx_plot(g)
Via the matplotlib library, Sage makes it easy to tell whether
a graph is on both sides of both axes, as the axes only cross
if the origin is actually part of the viewing area::
sage: plot(x^3,(x,0,2)) # this one has the origin
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**3,(x,0,2))
sphinx_plot(g)
::
sage: plot(x^3,(x,1,2)) # this one does not
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**3,(x,1,2))
sphinx_plot(g)
Another thing to be aware of with axis labeling is that when
the labels have quite different orders of magnitude or are very
large, scientific notation (the `e` notation for powers of ten) is used::
sage: plot(x^2,(x,480,500)) # this one has no scientific notation
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2,(x,480,500))
sphinx_plot(g)
::
sage: plot(x^2,(x,300,500)) # this one has scientific notation on y-axis
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2,(x,300,500))
sphinx_plot(g)
You can put a legend with ``legend_label`` (the legend is only put
once in the case of multiple functions)::
sage: plot(exp(x), 0, 2, legend_label='$e^x$')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(exp(x), 0, 2, legend_label='$e^x$')
sphinx_plot(g)
Sage understands TeX, so these all are slightly different, and you can choose
one based on your needs::
sage: plot(sin, legend_label='sin')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin,legend_label='sin')
sphinx_plot(g)
::
sage: plot(sin, legend_label='$sin$')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin,legend_label='$sin$')
sphinx_plot(g)
::
sage: plot(sin, legend_label=r'$\sin$')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin,legend_label=r'$\sin$')
sphinx_plot(g)
It is possible to use a different color for the text of each label::
sage: p1 = plot(sin, legend_label='sin', legend_color='red')
sage: p2 = plot(cos, legend_label='cos', legend_color='green')
sage: p1 + p2
Graphics object consisting of 2 graphics primitives
.. PLOT::
p1 = plot(sin, legend_label='sin', legend_color='red')
p2 = plot(cos, legend_label='cos', legend_color='green')
g = p1 + p2
sphinx_plot(g)
Prior to :trac:`19485`, legends by default had a shadowless gray
background. This behavior can be recovered by setting the legend
options on your plot object::
sage: p = plot(sin(x), legend_label=r'$\sin(x)$')
sage: p.set_legend_options(back_color=(0.9,0.9,0.9), shadow=False)
.. PLOT::
g = plot(sin(x), legend_label=r'$\sin(x)$')
g.set_legend_options(back_color=(0.9,0.9,0.9), shadow=False)
sphinx_plot(g)
If `X` is a list of Sage objects and ``legend_label`` is 'automatic', then Sage will
create labels for each function according to their internal representation::
sage: plot([sin(x), tan(x), 1-x^2], legend_label='automatic')
Graphics object consisting of 3 graphics primitives
.. PLOT::
g = plot([sin(x), tan(x), 1-x**2], legend_label='automatic')
sphinx_plot(g)
If ``legend_label`` is any single string other than 'automatic',
then it is repeated for all members of `X`::
sage: plot([sin(x), tan(x)], color='blue', legend_label='trig')
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = plot([sin(x), tan(x)], color='blue', legend_label='trig')
sphinx_plot(g)
Note that the independent variable may be omitted if there is no
ambiguity::
sage: plot(sin(1.0/x), (-1, 1))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(1.0/x), (-1, 1))
sphinx_plot(g)
Plotting in logarithmic scale is possible for 2D plots. There
are two different syntaxes supported::
sage: plot(exp, (1, 10), scale='semilogy') # log axis on vertical
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(exp, (1, 10), scale='semilogy')
sphinx_plot(g)
::
sage: plot_semilogy(exp, (1, 10)) # same thing
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_semilogy(exp, (1, 10))
sphinx_plot(g)
::
sage: plot_loglog(exp, (1, 10)) # both axes are log
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_loglog(exp, (1, 10))
sphinx_plot(g)
::
sage: plot(exp, (1, 10), scale='loglog', base=2) # long time # base of log is 2
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(exp, (1, 10), scale='loglog', base=2)
sphinx_plot(g)
We can also change the scale of the axes in the graphics just before
displaying::
sage: G = plot(exp, 1, 10) # long time
sage: G.show(scale=('semilogy', 2)) # long time
The algorithm used to insert extra points is actually pretty
simple. On the picture drawn by the lines below::
sage: p = plot(x^2, (-0.5, 1.4)) + line([(0,0), (1,1)], color='green')
sage: p += line([(0.5, 0.5), (0.5, 0.5^2)], color='purple')
sage: p += point(((0, 0), (0.5, 0.5), (0.5, 0.5^2), (1, 1)), color='red', pointsize=20)
sage: p += text('A', (-0.05, 0.1), color='red')
sage: p += text('B', (1.01, 1.1), color='red')
sage: p += text('C', (0.48, 0.57), color='red')
sage: p += text('D', (0.53, 0.18), color='red')
sage: p.show(axes=False, xmin=-0.5, xmax=1.4, ymin=0, ymax=2)
.. PLOT::
g = plot(x**2, (-0.5, 1.4)) + line([(0,0), (1,1)], color='green')
g = g + line([(0.5, 0.5), (0.5, 0.5**2)], color='purple')
g = g + point(((0, 0), (0.5, 0.5), (0.5, 0.5**2), (1, 1)), color='red', pointsize=20)
g = g + text('A', (-0.05, 0.1), color='red')
g = g + text('B', (1.01, 1.1), color='red')
g = g + text('C', (0.48, 0.57), color='red')
g = g + text('D', (0.53, 0.18), color='red')
g.axes(False)
g.xmin(-0.5)
g.xmax(1.4)
g.ymin(0)
g.ymax(2)
sphinx_plot(g)
You have the function (in blue) and its approximation (in green)
passing through the points A and B. The algorithm finds the
midpoint C of AB and computes the distance between C and D. If that
distance exceeds the ``adaptive_tolerance`` threshold (*relative* to
the size of the initial plot subintervals), the point D is
added to the curve. If D is added to the curve, then the
algorithm is applied recursively to the points A and D, and D and
B. It is repeated ``adaptive_recursion`` times (5, by default).
The actual sample points are slightly randomized, so the above
plots may look slightly different each time you draw them.
We draw the graph of an elliptic curve as the union of graphs of 2
functions.
::
sage: def h1(x): return abs(sqrt(x^3 - 1))
sage: def h2(x): return -abs(sqrt(x^3 - 1))
sage: P = plot([h1, h2], 1,4)
sage: P # show the result
Graphics object consisting of 2 graphics primitives
.. PLOT::
def h1(x): return abs(sqrt(x**3 - 1))
def h2(x): return -abs(sqrt(x**3 - 1))
P = plot([h1, h2], 1,4)
sphinx_plot(P)
It is important to mention that when we draw several graphs at the same time,
parameters ``xmin``, ``xmax``, ``ymin`` and ``ymax`` are just passed directly
to the ``show`` procedure. In fact, these parameters would be overwritten::
sage: p=plot(x^3, x, xmin=-1, xmax=1,ymin=-1, ymax=1)
sage: q=plot(exp(x), x, xmin=-2, xmax=2, ymin=0, ymax=4)
sage: (p+q).show()
As a workaround, we can perform the trick::
sage: p1 = line([(a,b) for a,b in zip(p[0].xdata,p[0].ydata) if (b>=-1 and b<=1)])
sage: q1 = line([(a,b) for a,b in zip(q[0].xdata,q[0].ydata) if (b>=0 and b<=4)])
sage: (p1+q1).show()
We can also directly plot the elliptic curve::
sage: E = EllipticCurve([0,-1])
sage: plot(E, (1, 4), color=hue(0.6))
Graphics object consisting of 1 graphics primitive
.. PLOT::
E = EllipticCurve([0,-1])
g = plot(E, (1, 4), color=hue(0.6))
sphinx_plot(g)
We can change the line style as well::
sage: plot(sin(x), (x, 0, 10), linestyle='-.')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x), (x, 0, 10), linestyle='-.')
sphinx_plot(g)
If we have an empty linestyle and specify a marker, we can see the
points that are actually being plotted::
sage: plot(sin(x), (x,0,10), plot_points=20, linestyle='', marker='.')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x), (x,0,10), plot_points=20, linestyle='', marker='.')
sphinx_plot(g)
The marker can be a TeX symbol as well::
sage: plot(sin(x), (x,0,10), plot_points=20, linestyle='', marker=r'$\checkmark$')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x), (x,0,10), plot_points=20, linestyle='', marker=r'$\checkmark$')
sphinx_plot(g)
Sage currently ignores points that cannot be evaluated
::
sage: set_verbose(-1)
sage: plot(-x*log(x), (x,0,1)) # this works fine since the failed endpoint is just skipped.
Graphics object consisting of 1 graphics primitive
sage: set_verbose(0)
This prints out a warning and plots where it can (we turn off the
warning by setting the verbose mode temporarily to -1.)
::
sage: set_verbose(-1)
sage: plot(x^(1/3), (x,-1,1))
Graphics object consisting of 1 graphics primitive
sage: set_verbose(0)
.. PLOT::
set_verbose(-1)
g = plot(x**(1.0/3.0), (x,-1,1))
sphinx_plot(g)
set_verbose(0)
Plotting the real cube root function for negative input
requires avoiding the complex numbers one would usually get.
The easiest way is to use absolute value::
sage: plot(sign(x)*abs(x)^(1/3), (x,-1,1))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sign(x)*abs(x)**(1.0/3.0), (x,-1,1))
sphinx_plot(g)
We can also use the following::
sage: plot(sign(x)*(x*sign(x))^(1/3), (x,-4,4))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sign(x)*(x*sign(x))**(1.0/3.0), (x,-4,4))
sphinx_plot(g)
A way that points to how to plot other functions without
symbolic variants is using lambda functions::
sage: plot(lambda x : RR(x).nth_root(3), (x,-1, 1))
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(plot(lambda x : RR(x).nth_root(3), (x,-1, 1)))
We can detect the poles of a function::
sage: plot(gamma, (-3, 4), detect_poles=True).show(ymin=-5, ymax=5)
.. PLOT::
g = plot(gamma, (-3, 4), detect_poles=True)
g.ymin(-5)
g.ymax(5)
sphinx_plot(g)
We draw the Gamma-Function with its poles highlighted::
sage: plot(gamma, (-3, 4), detect_poles='show').show(ymin=-5, ymax=5)
.. PLOT::
g = plot(gamma, (-3, 4), detect_poles='show')
g.ymin(-5)
g.ymax(5)
sphinx_plot(g)
The basic options for filling a plot::
sage: p1 = plot(sin(x), -pi, pi, fill='axis')
sage: p2 = plot(sin(x), -pi, pi, fill='min', fillalpha=1)
sage: p3 = plot(sin(x), -pi, pi, fill='max')
sage: p4 = plot(sin(x), -pi, pi, fill=(1-x)/3, fillcolor='blue', fillalpha=.2)
sage: graphics_array([[p1, p2], [p3, p4]]).show(frame=True, axes=False) # long time
.. PLOT::
p1 = plot(sin(x), -pi, pi, fill='axis'); print(p1)
p2 = plot(sin(x), -pi, pi, fill='min', fillalpha=1)
p3 = plot(sin(x), -pi, pi, fill='max')
p4 = plot(sin(x), -pi, pi, fill=(1-x)/3, fillcolor='blue', fillalpha=.2)
g = graphics_array([[p1, p2], [p3, p4]])
sphinx_plot(g) # Needs to accept options 'frame', 'axes', ...
The basic options for filling a list of plots::
sage: (f1, f2) = x*exp(-1*x^2)/.35, x*exp(-2*x^2)/.35
sage: p1 = plot([f1, f2], -pi, pi, fill={1: [0]}, fillcolor='blue', fillalpha=.25, color='blue')
sage: p2 = plot([f1, f2], -pi, pi, fill={0: x/3, 1:[0]}, color=['blue'])
sage: p3 = plot([f1, f2], -pi, pi, fill=[0, [0]], fillcolor=['orange','red'], fillalpha=1, color={1: 'blue'})
sage: p4 = plot([f1, f2], (x,-pi, pi), fill=[x/3, 0], fillcolor=['grey'], color=['red', 'blue'])
sage: graphics_array([[p1, p2], [p3, p4]]).show(frame=True, axes=False) # long time
.. PLOT::
(f1, f2) = x*exp(-1*x**2)/.35, x*exp(-2*x**2)/.35
p1 = plot([f1, f2], -pi, pi, fill={1: [0]}, fillcolor='blue', fillalpha=.25, color='blue')
p2 = plot([f1, f2], -pi, pi, fill={0: x/3, 1:[0]}, color=['blue'])
p3 = plot([f1, f2], -pi, pi, fill=[0, [0]], fillcolor=['orange','red'], fillalpha=1, color={1: 'blue'})
p4 = plot([f1, f2], (x,-pi, pi), fill=[x/3, 0], fillcolor=['grey'], color=['red', 'blue'])
g = graphics_array([[p1, p2], [p3, p4]])
sphinx_plot(g) # Needs to accept options 'frame', 'axes', ...
A example about the growth of prime numbers::
sage: plot(1.13*log(x), 1, 100, fill=lambda x: nth_prime(x)/floor(x), fillcolor='red')
Graphics object consisting of 2 graphics primitives
.. PLOT::
sphinx_plot(plot(1.13*log(x), 1, 100, fill=lambda x: nth_prime(x)/floor(x), fillcolor='red'))
Fill the area between a function and its asymptote::
sage: f = (2*x^3+2*x-1)/((x-2)*(x+1))
sage: plot([f, 2*x+2], -7,7, fill={0: [1]}, fillcolor='#ccc').show(ymin=-20, ymax=20)
.. PLOT::
f = (2*x**3+2*x-1)/((x-2)*(x+1))
g = plot([f, 2*x+2], -7,7, fill={0: [1]}, fillcolor='#ccc')
g.ymin(-20)
g.ymax(20)
sphinx_plot(g)
Fill the area between a list of functions and the x-axis::
sage: def b(n): return lambda x: bessel_J(n, x)
sage: plot([b(n) for n in [1..5]], 0, 20, fill='axis')
Graphics object consisting of 10 graphics primitives
.. PLOT::
def b(n): return lambda x: bessel_J(n, x)
g = plot([b(n) for n in range(1,6)], 0, 20, fill='axis')
sphinx_plot(g)
Note that to fill between the ith and jth functions, you must use
the dictionary key-value syntax ``i:[j]``; using key-value pairs
like ``i:j`` will fill between the ith function and the line y=j::
sage: def b(n): return lambda x: bessel_J(n, x) + 0.5*(n-1)
sage: plot([b(c) for c in [1..5]], 0, 20, fill={i:[i-1] for i in [1..4]}, color={i:'blue' for i in [1..5]}, aspect_ratio=3, ymax=3)
Graphics object consisting of 9 graphics primitives
sage: plot([b(c) for c in [1..5]], 0, 20, fill={i:i-1 for i in [1..4]}, color='blue', aspect_ratio=3) # long time
Graphics object consisting of 9 graphics primitives
.. PLOT::
def b(n): return lambda x: bessel_J(n, x) + 0.5*(n-1)
g1 = plot([b(n) for n in range(1,6)], 0, 20, fill={i:[i-1] for i in range(1,5)}, color={i:'blue' for i in range(1,6)}, aspect_ratio=3)
g2 = plot([b(n) for n in range(1,6)], 0, 20, fill={i:i-1 for i in range(1,5)}, color='blue', aspect_ratio=3) # long time
g1.ymax(3)
g = graphics_array([[g1], [g2]])
sphinx_plot(g)
Extra options will get passed on to :meth:`~sage.plot.graphics.Graphics.show`,
as long as they are valid::
sage: plot(sin(x^2), (x, -3, 3), title=r'Plot of $\sin(x^2)$', axes_labels=['$x$','$y$']) # These labels will be nicely typeset
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x**2), (x, -3, 3), title=r'Plot of $\sin(x^2)$', axes_labels=['$x$','$y$']) # These labels will be nicely typeset
sphinx_plot(g)
::
sage: plot(sin(x^2), (x, -3, 3), title='Plot of sin(x^2)', axes_labels=['x','y']) # These will not
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x**2), (x, -3, 3), title='Plot of sin(x^2)', axes_labels=['x','y']) # These will not
sphinx_plot(g)
::
sage: plot(sin(x^2), (x, -3, 3), axes_labels=['x','y'], axes_labels_size=2.5) # Large axes labels (w.r.t. the tick marks)
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x**2), (x, -3, 3), axes_labels=['x','y'], axes_labels_size=2.5) # Large axes labels (w.r.t. the tick marks)
sphinx_plot(g)
::
sage: plot(sin(x^2), (x, -3, 3), figsize=[8,2])
Graphics object consisting of 1 graphics primitive
sage: plot(sin(x^2), (x, -3, 3)).show(figsize=[8,2]) # These are equivalent
.. PLOT::
g = plot(sin(x**2), (x, -3, 3), figsize=[8,2])
sphinx_plot(g)
This includes options for custom ticks and formatting. See documentation
for :meth:`show` for more details.
::
sage: plot(sin(pi*x), (x, -8, 8), ticks=[[-7,-3,0,3,7],[-1/2,0,1/2]])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(pi*x), (x, -8, 8), ticks=[[-7,-3,0,3,7],[-1/2,0,1/2]])
sphinx_plot(g)
::
sage: plot(2*x+1,(x,0,5),ticks=[[0,1,e,pi,sqrt(20)],2],tick_formatter="latex")
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(2*x+1,(x,0,5),ticks=[[0,1,e,pi,sqrt(20)],2],tick_formatter="latex")
sphinx_plot(g)
This is particularly useful when setting custom ticks in multiples of `pi`.
::
sage: plot(sin(x),(x,0,2*pi),ticks=pi/3,tick_formatter=pi)
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(sin(x),(x,0,2*pi),ticks=pi/3,tick_formatter=pi)
sphinx_plot(g)
You can even have custom tick labels along with custom positioning. ::
sage: plot(x**2, (x,0,3), ticks=[[1,2.5],[0.5,1,2]], tick_formatter=[["$x_1$","$x_2$"],["$y_1$","$y_2$","$y_3$"]])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot(x**2, (x,0,3), ticks=[[1,2.5],[0.5,1,2]], tick_formatter=[["$x_1$","$x_2$"],["$y_1$","$y_2$","$y_3$"]])
sphinx_plot(g)
You can force Type 1 fonts in your figures by providing the relevant
option as shown below. This also requires that LaTeX, dvipng and
Ghostscript be installed::
sage: plot(x, typeset='type1') # optional - latex
Graphics object consisting of 1 graphics primitive
A example with excluded values::
sage: plot(floor(x), (x, 1, 10), exclude=[1..10])
Graphics object consisting of 11 graphics primitives
.. PLOT::
g = plot(floor(x), (x, 1, 10), exclude=list(range(1,11)))
sphinx_plot(g)
We exclude all points where :class:`~sage.functions.prime_pi.PrimePi`
makes a jump::
sage: jumps = [n for n in [1..100] if prime_pi(n) != prime_pi(n-1)]
sage: plot(lambda x: prime_pi(x), (x, 1, 100), exclude=jumps)
Graphics object consisting of 26 graphics primitives
.. PLOT::
#jumps = [n for n in [1..100] if prime_pi(n) != prime_pi(n-1)]
#syntaxError: invalid syntax, so we need more code
jumps=list()
for n in range(1,101):
if prime_pi(n) != prime_pi(n-1):
jumps.append(n)
g = plot(lambda x: prime_pi(x), (x, 1, 100), exclude=jumps)
sphinx_plot(g)
Excluded points can also be given by an equation::
sage: g(x) = x^2-2*x-2
sage: plot(1/g(x), (x, -3, 4), exclude=g(x)==0, ymin=-5, ymax=5) # long time
Graphics object consisting of 3 graphics primitives
.. PLOT::
def g(x): return x**2-2*x-2
G = plot(1/g(x), (x, -3, 4), exclude=g(x)==0, ymin=-5, ymax=5)
sphinx_plot(G)
``exclude`` and ``detect_poles`` can be used together::
sage: f(x) = (floor(x)+0.5) / (1-(x-0.5)^2)
sage: plot(f, (x, -3.5, 3.5), detect_poles='show', exclude=[-3..3], ymin=-5, ymax=5)
Graphics object consisting of 12 graphics primitives
.. PLOT::
def f(x): return (floor(x)+0.5) / (1-(x-0.5)**2)
g = plot(f, (x, -3.5, 3.5), detect_poles='show', exclude=list(range(-3,4)), ymin=-5, ymax=5)
sphinx_plot(g)
Regions in which the plot has no values are automatically excluded. The
regions thus excluded are in addition to the exclusion points present
in the ``exclude`` keyword argument.::
sage: set_verbose(-1)
sage: plot(arcsec, (x, -2, 2)) # [-1, 1] is excluded automatically
Graphics object consisting of 2 graphics primitives
.. PLOT::
set_verbose(-1)
g = plot(arcsec, (x, -2, 2)) # [-1, 1] is excluded automatically
sphinx_plot(g)
::
sage: plot(arcsec, (x, -2, 2), exclude=[1.5]) # x=1.5 is also excluded
Graphics object consisting of 3 graphics primitives
.. PLOT::
set_verbose(-1)
g = plot(arcsec, (x, -2, 2), exclude=[1.5]) # x=1.5 is also excluded
sphinx_plot(g)
::
sage: plot(arcsec(x/2), -2, 2) # plot should be empty; no valid points
Graphics object consisting of 0 graphics primitives
sage: plot(sqrt(x^2-1), -2, 2) # [-1, 1] is excluded automatically
Graphics object consisting of 2 graphics primitives
.. PLOT::
set_verbose(-1)
g = plot(sqrt(x**2-1), -2, 2) # [-1, 1] is excluded automatically
sphinx_plot(g)
::
sage: plot(arccsc, -2, 2) # [-1, 1] is excluded automatically
Graphics object consisting of 2 graphics primitives
sage: set_verbose(0)
.. PLOT::
set_verbose(-1)
g = plot(arccsc, -2, 2) # [-1, 1] is excluded automatically
sphinx_plot(g)
TESTS:
We do not randomize the endpoints::
sage: p = plot(x, (x,-1,1))
sage: p[0].xdata[0] == -1
True
sage: p[0].xdata[-1] == 1
True
We check to make sure that the x/y min/max data get set correctly
when there are multiple functions.
::
sage: d = plot([sin(x), cos(x)], 100, 120).get_minmax_data()
sage: d['xmin']
100.0
sage: d['xmax']
120.0
We check various combinations of tuples and functions, ending with
tests that lambda functions work properly with explicit variable
declaration, without a tuple.
::
sage: p = plot(lambda x: x,(x,-1,1))
sage: p = plot(lambda x: x,-1,1)
sage: p = plot(x,x,-1,1)
sage: p = plot(x,-1,1)
sage: p = plot(x^2,x,-1,1)
sage: p = plot(x^2,xmin=-1,xmax=2)
sage: p = plot(lambda x: x,x,-1,1)
sage: p = plot(lambda x: x^2,x,-1,1)
sage: p = plot(lambda x: 1/x,x,-1,1)
sage: f(x) = sin(x+3)-.1*x^3
sage: p = plot(lambda x: f(x),x,-1,1)
We check to handle cases where the function gets evaluated at a
point which causes an 'inf' or '-inf' result to be produced.
::
sage: p = plot(1/x, 0, 1)
sage: p = plot(-1/x, 0, 1)
Bad options now give better errors::
sage: P = plot(sin(1/x), (x,-1,3), foo=10)
Traceback (most recent call last):
...
RuntimeError: Error in line(): option 'foo' not valid.
sage: P = plot(x, (x,1,1)) # trac ticket #11753
Traceback (most recent call last):
...
ValueError: plot start point and end point must be different
We test that we can plot `f(x)=x` (see :trac:`10246`)::
sage: f(x)=x; f
x |--> x
sage: plot(f,(x,-1,1))
Graphics object consisting of 1 graphics primitive
Check that :trac:`15030` is fixed::
sage: plot(abs(log(x)), x)
Graphics object consisting of 1 graphics primitive
Check that if excluded points are less than xmin then the exclusion
still works for polar and parametric plots. The following should
show two excluded points::
sage: set_verbose(-1)
sage: polar_plot(sin(sqrt(x^2-1)), (x,0,2*pi), exclude=[1/2,2,3])
Graphics object consisting of 3 graphics primitives
sage: parametric_plot((sqrt(x^2-1),sqrt(x^2-1/2)), (x,0,5), exclude=[1,2,3])
Graphics object consisting of 3 graphics primitives
sage: set_verbose(0)
Legends can contain variables with long names, :trac:`13543`::
sage: hello = var('hello')
sage: label = '$' + latex(hello) + '$'
sage: plot(x, x, 0, 1, legend_label=label)
Graphics object consisting of 1 graphics primitive
Extra keywords should be saved if object has a plot method, :trac:`20924`::
sage: G = graphs.PetersenGraph()
sage: p = G.plot()
sage: p.aspect_ratio()
1.0
sage: pp = plot(G)
sage: pp.aspect_ratio()
1.0
"""
if 'color' in kwds and 'rgbcolor' in kwds:
raise ValueError('only one of color or rgbcolor should be specified')
elif 'color' in kwds:
kwds['rgbcolor'] = kwds.pop('color', (0,0,1)) # take blue as default ``rgbcolor``
G_kwds = Graphics._extract_kwds_for_show(kwds, ignore=['xmin', 'xmax'])
original_opts = kwds.pop('__original_opts', {})
do_show = kwds.pop('show',False)
from sage.structure.element import is_Vector
if kwds.get('parametric',False) and is_Vector(funcs):
funcs = tuple(funcs)
if hasattr(funcs, 'plot'):
G = funcs.plot(*args, **original_opts)
# If we have extra keywords already set, then update them
for ext in G._extra_kwds:
if ext in G_kwds:
G_kwds[ext] = G._extra_kwds[ext]
# if we are using the generic plotting method
else:
n = len(args)
# if there are no extra args, try to get xmin,xmax from
# keyword arguments or pick some silly default
if n == 0:
xmin = kwds.pop('xmin', -1)
xmax = kwds.pop('xmax', 1)
G = _plot(funcs, (xmin, xmax), **kwds)
# if there is one extra arg, then it had better be a tuple
elif n == 1:
G = _plot(funcs, *args, **kwds)
elif n == 2:
# if there are two extra args, then pull them out and pass them as a tuple
xmin = args[0]
xmax = args[1]
args = args[2:]
G = _plot(funcs, (xmin, xmax), *args, **kwds)
elif n == 3:
# if there are three extra args, then pull them out and pass them as a tuple
var = args[0]
xmin = args[1]
xmax = args[2]
args = args[3:]
G = _plot(funcs, (var, xmin, xmax), *args, **kwds)
elif ('xmin' in kwds) or ('xmax' in kwds):
xmin = kwds.pop('xmin', -1)
xmax = kwds.pop('xmax', 1)
G = _plot(funcs, (xmin, xmax), *args, **kwds)
pass
else:
sage.misc.misc.verbose("there were %s extra arguments (besides %s)" % (n, funcs), level=0)
G._set_extra_kwds(G_kwds)
if do_show:
G.show()
return G
def _plot(funcs, xrange, parametric=False,
polar=False, fill=False, label='', randomize=True, **options):
"""
Internal function which does the actual plotting.
INPUT:
- ``funcs`` - function or list of functions to be plotted
- ``xrange`` - two or three tuple of [input variable], min and max
- ``parametric`` - (default: False) a boolean for whether
this is a parametric plot
- ``polar`` - (default: False) a boolean for whether
this is a polar plot
- ``fill`` - (default: False) an input for whether
this plot is filled
- ``randomize`` - (default: True) a boolean for whether
to use random plot points
The following option is deprecated in favor of ``legend_label``:
- ``label`` - (default: '') a string for the label
All other usual plot options are also accepted, and a number
are required (see the example below) which are normally passed
through the options decorator to :func:`plot`.
OUTPUT:
- A ``Graphics`` object
EXAMPLES::
See :func:`plot` for many, many implicit examples.
Here is an explicit one::
sage: from sage.plot.plot import _plot
sage: P = _plot(e^(-x^2),(-3,3),fill=True,color='red',plot_points=50,adaptive_tolerance=2,adaptive_recursion=True,exclude=None)
sage: P.show(aspect_ratio='automatic')
TESTS:
Make sure that we get the right number of legend entries as the number of
functions varies (:trac:`10514`)::
sage: p1 = plot(1*x, legend_label='1x')
sage: p2 = plot(2*x, legend_label='2x', color='green')
sage: p1+p2
Graphics object consisting of 2 graphics primitives
::
sage: len(p1.matplotlib().axes[0].legend().texts)
1
sage: len((p1+p2).matplotlib().axes[0].legend().texts)
2
sage: q1 = plot([sin(x), tan(x)], color='blue', legend_label='trig')
sage: len(q1.matplotlib().axes[0].legend().texts)
2
sage: q1
Graphics object consisting of 2 graphics primitives
sage: q2 = plot([sin(x), tan(x)], legend_label={1:'tan'})
sage: len(q2.matplotlib().axes[0].legend().texts)
2
sage: q2
Graphics object consisting of 2 graphics primitives
::
Make sure that we don't get multiple legend labels for plot segments
(:trac:`11998`)::
sage: p1 = plot(1/(x^2-1),(x,-2,2),legend_label="foo",detect_poles=True)
sage: len(p1.matplotlib().axes[0].legend().texts)
1
sage: p1.show(ymin=-10,ymax=10) # should be one legend
Parametric plots that get evaluated at invalid points should still
plot properly (:trac:`13246`)::
sage: parametric_plot((x, arcsec(x)), (x, -2, 2))
Graphics object consisting of 2 graphics primitives
"""
from sage.plot.colors import Color
from sage.plot.misc import setup_for_eval_on_grid
if funcs == []:
return Graphics()
orig_funcs = funcs # keep the original functions (for use in legend labels)
excluded_points = []
funcs, ranges = setup_for_eval_on_grid(funcs, [xrange], options['plot_points'])
xmin, xmax, delta = ranges[0]
xrange=ranges[0][:2]
# parametric_plot will be a list or tuple of two functions (f,g)
# and will plotted as (f(x), g(x)) for all x in the given range
if parametric:
f, g = funcs
# or we have only a single function to be plotted:
else:
f = funcs
# Keep ``rgbcolor`` option 'automatic' only for lists of functions.
# Otherwise, let the plot color be 'blue'.
if parametric or not isinstance(funcs, (list, tuple)):
if 'rgbcolor' in options.keys() and options['rgbcolor'] == 'automatic':
options['rgbcolor'] = (0, 0, 1) # default color for a single curve.
# Check to see if funcs is a list of functions that will be all plotted together.
if isinstance(funcs, (list, tuple)) and not parametric:
def golden_rainbow(i,lightness=0.4):
# note: sage's "blue" has hue-saturation-lightness values (2/3, 1, 1/2).
g = 0.61803398875
return Color((0.66666666666666 + i*g) % 1, 1, lightness, space='hsl')
default_line_styles = ("-", "--", "-.", ":")*len(funcs)
G = Graphics()
for i, h in enumerate(funcs):
options_temp = options.copy()
color_temp = options_temp.pop('rgbcolor', 'automatic')
fill_temp = options_temp.pop('fill', fill)
fillcolor_temp = options_temp.pop('fillcolor', 'automatic') # perhaps the 2nd argument should be ``options_temp['color']``
linestyle_temp = options_temp.pop('linestyle', None)
legend_label_temp = options_temp.pop('legend_label', None)
legend_color_temp = options_temp.pop('legend_color', None)
# passed more than one color directive?
one_plot_color = False
if isinstance(color_temp, dict):
if i in color_temp:
color_entry = color_temp[i]
else:
color_entry = golden_rainbow(i)
elif isinstance(color_temp, (list, tuple)) and isinstance(color_temp[0], (str, list, tuple)):
if i < len(color_temp):
color_entry = color_temp[i]
else:
color_entry = golden_rainbow(i)
elif color_temp == 'automatic':
color_entry = golden_rainbow(i)
else:
# assume a single color was assigned for all plots
one_plot_color = True
color_entry = color_temp
# passed more than one fill directive?
if isinstance(fill_temp, dict):
if i in fill_temp:
fill_entry_listQ = fill_temp[i]
if isinstance(fill_entry_listQ, list):
if fill_entry_listQ[0] < len(funcs):
fill_entry = funcs[fill_entry_listQ[0]]
else:
fill_entry = False
else:
fill_entry = fill_entry_listQ
else:
fill_entry = False
elif isinstance(fill_temp, (list, tuple)):
if i < len(fill_temp):
fill_entry_listQ = fill_temp[i]
if isinstance(fill_entry_listQ, list):
if fill_entry_listQ[0] < len(funcs):
fill_entry = funcs[fill_entry_listQ[0]]
else:
fill_entry = False
else:
fill_entry = fill_entry_listQ
else:
fill_entry = False
else:
fill_entry = fill_temp
# passed more than one fillcolor directive?
fillcolor_entry = 'gray' # the default choice
if isinstance(fillcolor_temp, dict):
if i in fillcolor_temp:
fillcolor_entry = fillcolor_temp[i]
else:
fillcolor_entry = golden_rainbow(i,0.85)
elif isinstance(fillcolor_temp, (list, tuple)):
if i < len(fillcolor_temp):
fillcolor_entry = fillcolor_temp[i]
else:
fillcolor_entry = golden_rainbow(i,0.85)
elif fillcolor_temp == 'automatic':
# check that we haven't overwritten automatic multi-colors in color_temp
if len(funcs) > 1 and not one_plot_color:
fillcolor_entry = golden_rainbow(i,0.85)
elif fillcolor_temp is not None:
fillcolor_entry = fillcolor_temp
# passed more than one linestyle directive?
if isinstance(linestyle_temp, dict):
if i in linestyle_temp:
linestyle_entry = linestyle_temp[i]
else:
linestyle_entry = default_line_styles[i]
elif isinstance(linestyle_temp, (list, tuple)):
if i < len(linestyle_temp):
linestyle_entry = linestyle_temp[i]
else:
linestyle_entry = default_line_styles[i]
elif linestyle_temp == 'automatic':
linestyle_entry = default_line_styles[i]
elif linestyle_temp is None:
linestyle_entry = 'solid'
else:
linestyle_entry = linestyle_temp
# passed more than one legend_label directive?
legend_label_entry = orig_funcs[i].__repr__() # the 'automatic' choice
if isinstance(legend_label_temp, dict):
if i in legend_label_temp:
legend_label_entry = legend_label_temp[i]
elif isinstance(legend_label_temp, (list, tuple)):
if i < len(legend_label_temp):
legend_label_entry = legend_label_temp[i]
elif legend_label_temp is not 'automatic':
# assume it is None or str.
legend_label_entry = legend_label_temp
# passed more than one legend_color directive?
legend_color_entry = 'black' # the default choice
if isinstance(legend_color_temp, dict):
if i in legend_color_temp:
legend_color_entry = legend_color_temp[i]
elif isinstance(legend_color_temp, (list, tuple)) and isinstance(legend_color_temp[0], (str, list, tuple)):
if i < len(legend_color_temp):
legend_color_entry = legend_color_temp[i]
elif legend_color_temp == 'automatic':
if len(funcs)>1:
legend_color_entry = golden_rainbow(i)
elif legend_color_temp is not None:
legend_color_entry = legend_color_temp
G += plot(h, xrange, polar=polar, fill=fill_entry, fillcolor=fillcolor_entry, \
rgbcolor=color_entry, linestyle=linestyle_entry, \
legend_label=legend_label_entry, legend_color=legend_color_entry, **options_temp)
return G
adaptive_tolerance = options.pop('adaptive_tolerance')
adaptive_recursion = options.pop('adaptive_recursion')
plot_points = int(options.pop('plot_points'))
exclude = options.pop('exclude')
if exclude is not None:
from sage.symbolic.expression import Expression
if isinstance(exclude, Expression) and exclude.is_relational():
if len(exclude.variables()) > 1:
raise ValueError('exclude has to be an equation of only one variable')
v = exclude.variables()[0]
points = [e.right() for e in exclude.solve(v) if e.left() == v and (v not in e.right().variables())]
# We are only interested in real solutions
for x in points:
try:
excluded_points.append(float(x))
except TypeError:
pass
excluded_points.sort()
# We should either have a list in excluded points or exclude
# itself must be a list
elif isinstance(exclude, (list, tuple)):
excluded_points = sorted(exclude)
else:
raise ValueError('exclude needs to be a list of numbers or an equation')
# We make sure that points plot points close to the excluded points are computed
epsilon = 0.001*(xmax - xmin)
initial_points = reduce(lambda a,b: a+b,
[[x - epsilon, x + epsilon]
for x in excluded_points], [])
data = generate_plot_points(f, xrange, plot_points,
adaptive_tolerance, adaptive_recursion,
randomize, initial_points)
else:
data = generate_plot_points(f, xrange, plot_points,
adaptive_tolerance, adaptive_recursion,
randomize)
for i in range(len(data)-1):
# If the difference between consecutive x-values is more than
# 2 times the difference between two consecutive plot points, then
# add an exclusion point.
if abs(data[i+1][0] - data[i][0]) > 2*abs(xmax - xmin)/plot_points:
excluded_points.append((data[i][0] + data[i+1][0])/2)
if parametric:
# We need the original x-values to be able to exclude points in parametric plots
exclude_data = data
newdata = []
for x,fdata in data:
try:
newdata.append((fdata, g(x)))
except (ValueError, TypeError):
newdata.append((fdata, 0)) # append a dummy value 0
excluded_points.append(x)
data = newdata
excluded_points.sort(reverse=True)
G = Graphics()
fillcolor = options.pop('fillcolor', 'automatic')
fillalpha = options.pop('fillalpha', 0.5)
# TODO: Use matplotlib's fill and fill_between commands.
if fill is not False and fill is not None:
if parametric:
filldata = data
else:
if fill == 'axis' or fill is True:
base_level = 0
elif fill == 'min':
base_level = min(t[1] for t in data)
elif fill == 'max':
base_level = max(t[1] for t in data)
elif hasattr(fill, '__call__'):
if fill == max or fill == min:
if fill == max:
fstr = 'max'
else:
fstr = 'min'
msg = "WARNING: You use the built-in function %s for filling. You probably wanted the string '%s'." % (fstr, fstr)
sage.misc.misc.verbose(msg, level=0)
if not is_fast_float(fill):
fill_f = fast_float(fill, expect_one_var=True)
else:
fill_f = fill
filldata = generate_plot_points(fill_f, xrange, plot_points, adaptive_tolerance, \
adaptive_recursion, randomize)
filldata.reverse()
filldata += data
else:
try:
base_level = float(fill)
except TypeError:
base_level = 0
if not hasattr(fill, '__call__') and polar:
filldata = generate_plot_points(lambda x: base_level, xrange, plot_points, adaptive_tolerance, \
adaptive_recursion, randomize)
filldata.reverse()
filldata += data
if not hasattr(fill, '__call__') and not polar:
filldata = [(data[0][0], base_level)] + data + [(data[-1][0], base_level)]
if fillcolor == 'automatic':
fillcolor = (0.5, 0.5, 0.5)
fill_options = {}
fill_options['rgbcolor'] = fillcolor
fill_options['alpha'] = fillalpha
fill_options['thickness'] = 0
if polar:
filldata = [(y*cos(x), y*sin(x)) for x, y in filldata]
G += polygon(filldata, **fill_options)
# We need the original data to be able to exclude points in polar plots
if not parametric:
exclude_data = data
if polar:
data = [(y*cos(x), y*sin(x)) for x, y in data]
detect_poles = options.pop('detect_poles', False)
legend_label = options.pop('legend_label', None)
if excluded_points or detect_poles:
start_index = 0
# setup for pole detection
from sage.rings.all import RDF
epsilon = 0.0001
pole_options = {}
pole_options['linestyle'] = '--'
pole_options['thickness'] = 1
pole_options['rgbcolor'] = '#ccc'
# setup for exclusion points
exclusion_point = 0
if excluded_points:
exclusion_point = excluded_points.pop()
flag = True
for i in range(len(data)-1):
x0, y0 = exclude_data[i]
x1, y1 = exclude_data[i+1]
# detect poles
if (not (polar or parametric)) and detect_poles \
and ((y1 > 0 and y0 < 0) or (y1 < 0 and y0 > 0)):
# calculate the slope of the line segment
dy = abs(y1-y0)
dx = x1 - x0
alpha = (RDF(dy)/RDF(dx)).arctan()
if alpha >= RDF(pi/2) - epsilon:
G += line(data[start_index:i], **options)
if detect_poles == 'show':
# draw a vertical asymptote
G += line([(x0, y0), (x1, y1)], **pole_options)
start_index = i+2
# exclude points
if x0 > exclusion_point:
while exclusion_point <= x1:
try:
exclusion_point = excluded_points.pop()
except IndexError:
# all excluded points were considered
flag = False
break
elif flag and (x0 <= exclusion_point <= x1):
G += line(data[start_index:i], **options)
start_index = i + 2
while exclusion_point <= x1:
try:
exclusion_point = excluded_points.pop()
except IndexError:
# all excluded points were considered
flag = False
break
G += line(data[start_index:], legend_label=legend_label, **options)
else:
G += line(data, legend_label=legend_label, **options)
return G
########## misc functions ###################
@options(aspect_ratio=1.0)
def parametric_plot(funcs, *args, **kwargs):
r"""
Plot a parametric curve or surface in 2d or 3d.
:func:`parametric_plot` takes two or three functions as a
list or a tuple and makes a plot with the first function giving the
`x` coordinates, the second function giving the `y`
coordinates, and the third function (if present) giving the
`z` coordinates.
In the 2d case, :func:`parametric_plot` is equivalent to the :func:`plot` command
with the option ``parametric=True``. In the 3d case, :func:`parametric_plot`
is equivalent to :func:`~sage.plot.plot3d.parametric_plot3d.parametric_plot3d`.
See each of these functions for more help and examples.
INPUT:
- ``funcs`` - 2 or 3-tuple of functions, or a vector of dimension 2 or 3.
- ``other options`` - passed to :func:`plot` or :func:`~sage.plot.plot3d.parametric_plot3d.parametric_plot3d`
EXAMPLES: We draw some 2d parametric plots. Note that the default aspect ratio
is 1, so that circles look like circles. ::
sage: t = var('t')
sage: parametric_plot( (cos(t), sin(t)), (t, 0, 2*pi))
Graphics object consisting of 1 graphics primitive
.. PLOT::
t = var('t')
g = parametric_plot( (cos(t), sin(t)), (t, 0, 2*pi))
sphinx_plot(g)
::
sage: parametric_plot( (sin(t), sin(2*t)), (t, 0, 2*pi), color=hue(0.6) )
Graphics object consisting of 1 graphics primitive
.. PLOT::
t = var('t')
g = parametric_plot( (sin(t), sin(2*t)), (t, 0, 2*pi), color=hue(0.6))
sphinx_plot(g)
::
sage: parametric_plot((1, t), (t, 0, 4))
Graphics object consisting of 1 graphics primitive
.. PLOT::
t =var('t')
g = parametric_plot((1, t), (t, 0, 4))
sphinx_plot(g)
Note that in parametric_plot, there is only fill or no fill.
::
sage: parametric_plot((t, t^2), (t, -4, 4), fill=True)
Graphics object consisting of 2 graphics primitives
.. PLOT::
t =var('t')
g = parametric_plot((t, t**2), (t, -4, 4), fill=True)
sphinx_plot(g)
A filled Hypotrochoid::
sage: parametric_plot([cos(x) + 2 * cos(x/4), sin(x) - 2 * sin(x/4)], (x,0, 8*pi), fill=True)
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = parametric_plot([cos(x) + 2 * cos(x/4), sin(x) - 2 * sin(x/4)], (x,0, 8*pi), fill=True)
sphinx_plot(g)
::
sage: parametric_plot( (5*cos(x), 5*sin(x), x), (x,-12, 12), plot_points=150, color="red") # long time
Graphics3d Object
.. PLOT::
#AttributeError: 'Line' object has no attribute 'plot'
#g = parametric_plot( (5*cos(x), 5*sin(x), x), (x,-12, 12), plot_points=150, color="red") # long time
#sphinx_plot(g)
::
sage: y=var('y')
sage: parametric_plot( (5*cos(x), x*y, cos(x*y)), (x, -4,4), (y,-4,4)) # long time`
Graphics3d Object
.. PLOT::
#AttributeError: 'sage.plot.plot3d.parametric_surface.ParametricSurf' object has no attribute 'plot'
#y = var('y')
#g = parametric_plot( (5*cos(x), x*y, cos(x*y)), (x, -4,4), (y,-4,4)) # long time`
#sphinx_plot(g)
::
sage: t=var('t')
sage: parametric_plot( vector((sin(t), sin(2*t))), (t, 0, 2*pi), color='green') # long time
Graphics object consisting of 1 graphics primitive
.. PLOT::
t = var('t')
g = parametric_plot( vector((sin(t), sin(2*t))), (t, 0, 2*pi), color='green') # long time
sphinx_plot(g)
::
sage: t = var('t')
sage: parametric_plot( vector([t, t+1, t^2]), (t, 0, 1)) # long time
Graphics3d Object
.. PLOT::
#t = var('t')
#g = parametric_plot( vector([t, t+1, t**2]), (t, 0, 1)) # long time
#sphinx_plot(g)
Plotting in logarithmic scale is possible with 2D plots. The keyword
``aspect_ratio`` will be ignored if the scale is not ``'loglog'`` or
``'linear'``.::
sage: parametric_plot((x, x**2), (x, 1, 10), scale='loglog')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = parametric_plot((x, x**2), (x, 1, 10), scale='loglog')
sphinx_plot(g)
We can also change the scale of the axes in the graphics just before
displaying. In this case, the ``aspect_ratio`` must be specified as
``'automatic'`` if the ``scale`` is set to ``'semilogx'`` or ``'semilogy'``. For
other values of the ``scale`` parameter, any ``aspect_ratio`` can be
used, or the keyword need not be provided.::
sage: p = parametric_plot((x, x**2), (x, 1, 10))
sage: p.show(scale='semilogy', aspect_ratio='automatic')
TESTS::
sage: parametric_plot((x, t^2), (x, -4, 4))
Traceback (most recent call last):
...
ValueError: there are more variables than variable ranges
sage: parametric_plot((1, x+t), (x, -4, 4))
Traceback (most recent call last):
...
ValueError: there are more variables than variable ranges
sage: parametric_plot((-t, x+t), (x, -4, 4))
Traceback (most recent call last):
...
ValueError: there are more variables than variable ranges
sage: parametric_plot((1, x+t, y), (x, -4, 4), (t, -4, 4))
Traceback (most recent call last):
...
ValueError: there are more variables than variable ranges
sage: parametric_plot((1, x, y), 0, 4)
Traceback (most recent call last):
...
ValueError: there are more variables than variable ranges
"""
num_ranges=0
for i in args:
if isinstance(i, (list, tuple)):
num_ranges+=1
else:
break
if num_ranges==0 and len(args)>=2:
from sage.misc.superseded import deprecation
deprecation(7008, "variable ranges to parametric_plot must be given as tuples, like (2,4) or (t,2,3)")
args=tuple(args)
num_ranges=1
num_funcs = len(funcs)
num_vars=len(sage.plot.misc.unify_arguments(funcs)[0])
if num_vars>num_ranges:
raise ValueError("there are more variables than variable ranges")
# Reset aspect_ratio to 'automatic' in case scale is 'semilog[xy]'.
# Otherwise matplotlib complains.
scale = kwargs.get('scale', None)
if isinstance(scale, (list, tuple)):
scale = scale[0]
if scale == 'semilogy' or scale == 'semilogx':
kwargs['aspect_ratio'] = 'automatic'
if num_funcs == 2 and num_ranges == 1:
kwargs['parametric'] = True
return plot(funcs, *args, **kwargs)
elif (num_funcs == 3 and num_ranges <= 2):
return sage.plot.plot3d.parametric_plot3d.parametric_plot3d(funcs, *args, **kwargs)
else:
raise ValueError("the number of functions and the number of variable ranges is not a supported combination for a 2d or 3d parametric plots")
@options(aspect_ratio=1.0)
def polar_plot(funcs, *args, **kwds):
r"""
``polar_plot`` takes a single function or a list or
tuple of functions and plots them with polar coordinates in the given
domain.
This function is equivalent to the :func:`plot` command with the options
``polar=True`` and ``aspect_ratio=1``. For more help on options,
see the documentation for :func:`plot`.
INPUT:
- ``funcs`` - a function
- other options are passed to plot
EXAMPLES:
Here is a blue 8-leaved petal::
sage: polar_plot(sin(5*x)^2, (x, 0, 2*pi), color='blue')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = polar_plot(sin(5*x)**2, (x, 0, 2*pi), color='blue')
sphinx_plot(g)
A red figure-8::
sage: polar_plot(abs(sqrt(1 - sin(x)^2)), (x, 0, 2*pi), color='red')
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = polar_plot(abs(sqrt(1 - sin(x)**2)), (x, 0, 2*pi), color='red')
sphinx_plot(g)
A green limacon of Pascal::
sage: polar_plot(2 + 2*cos(x), (x, 0, 2*pi), color=hue(0.3))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = polar_plot(2 + 2*cos(x), (x, 0, 2*pi), color=hue(0.3))
sphinx_plot(g)
Several polar plots::
sage: polar_plot([2*sin(x), 2*cos(x)], (x, 0, 2*pi))
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = polar_plot([2*sin(x), 2*cos(x)], (x, 0, 2*pi))
sphinx_plot(g)
A filled spiral::
sage: polar_plot(sqrt, 0, 2 * pi, fill=True)
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = polar_plot(sqrt, 0, 2 * pi, fill=True)
sphinx_plot(g)
Fill the area between two functions::
sage: polar_plot(cos(4*x) + 1.5, 0, 2*pi, fill=0.5 * cos(4*x) + 2.5, fillcolor='orange')
Graphics object consisting of 2 graphics primitives
.. PLOT::
g = polar_plot(cos(4*x) + 1.5, 0, 2*pi, fill=0.5 * cos(4*x) + 2.5, fillcolor='orange')
sphinx_plot(g)
Fill the area between several spirals::
sage: polar_plot([(1.2+k*0.2)*log(x) for k in range(6)], 1, 3 * pi, fill={0: [1], 2: [3], 4: [5]})
Graphics object consisting of 9 graphics primitives
.. PLOT::
g = polar_plot([(1.2+k*0.2)*log(x) for k in range(6)], 1, 3 * pi, fill={0: [1], 2: [3], 4: [5]})
sphinx_plot(g)
Exclude points at discontinuities::
sage: polar_plot(log(floor(x)), (x, 1, 4*pi), exclude=[1..12])
Graphics object consisting of 12 graphics primitives
.. PLOT::
g = polar_plot(log(floor(x)), (x, 1, 4*pi), exclude=list(range(1,13)))
sphinx_plot(g)
"""
kwds['polar']=True
return plot(funcs, *args, **kwds)
@options(aspect_ratio='automatic')
def list_plot(data, plotjoined=False, **kwargs):
r"""
``list_plot`` takes either a list of numbers, a list of tuples, a numpy
array, or a dictionary and plots the corresponding points.
If given a list of numbers (that is, not a list of tuples or lists),
``list_plot`` forms a list of tuples ``(i, x_i)`` where ``i`` goes from
0 to ``len(data)-1`` and ``x_i`` is the ``i``-th data value, and puts
points at those tuple values.
``list_plot`` will plot a list of complex numbers in the obvious
way; any numbers for which
:func:`CC()<sage.rings.complex_field.ComplexField>` makes sense will
work.
``list_plot`` also takes a list of tuples ``(x_i, y_i)`` where ``x_i``
and ``y_i`` are the ``i``-th values representing the ``x``- and
``y``-values, respectively.
If given a dictionary, ``list_plot`` interprets the keys as
`x`-values and the values as `y`-values.
The ``plotjoined=True`` option tells ``list_plot`` to plot a line
joining all the data.
For other keyword options that the ``list_plot`` function can
take, refer to :func:`~sage.plot.plot.plot`.
It is possible to pass empty dictionaries, lists, or tuples to
``list_plot``. Doing so will plot nothing (returning an empty plot).
EXAMPLES::
sage: list_plot([i^2 for i in range(5)]) # long time
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot([i**2 for i in range(5)]) # long time
sphinx_plot(g)
Here are a bunch of random red points::
sage: r = [(random(),random()) for _ in range(20)]
sage: list_plot(r, color='red')
Graphics object consisting of 1 graphics primitive
.. PLOT::
r = [(random(),random()) for _ in range(20)]
g = list_plot(r, color='red')
sphinx_plot(g)
This gives all the random points joined in a purple line::
sage: list_plot(r, plotjoined=True, color='purple')
Graphics object consisting of 1 graphics primitive
.. PLOT::
r = [(random(),random()) for _ in range(20)]
g = list_plot(r, plotjoined=True, color='purple')
sphinx_plot(g)
You can provide a numpy array.::
sage: import numpy
sage: list_plot(numpy.arange(10))
Graphics object consisting of 1 graphics primitive
.. PLOT::
import numpy
g = list_plot(numpy.arange(10))
sphinx_plot(g)
::
sage: list_plot(numpy.array([[1,2], [2,3], [3,4]]))
Graphics object consisting of 1 graphics primitive
.. PLOT::
import numpy
g = list_plot(numpy.array([[1,2], [2,3], [3,4]]))
sphinx_plot(g)
Plot a list of complex numbers::
sage: list_plot([1, I, pi + I/2, CC(.25, .25)])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot([1, I, pi + I/2, CC(.25, .25)])
sphinx_plot(g)
::
sage: list_plot([exp(I*theta) for theta in [0, .2..pi]])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot([exp(I*theta) for theta in srange(0,pi,0.2)])
sphinx_plot(g)
Note that if your list of complex numbers are all actually real,
they get plotted as real values, so this
::
sage: list_plot([CDF(1), CDF(1/2), CDF(1/3)])
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot([CDF(1), CDF(1/2), CDF(1/3)])
sphinx_plot(g)
is the same as ``list_plot([1, 1/2, 1/3])`` -- it produces a plot of
the points `(0,1)`, `(1,1/2)`, and `(2,1/3)`.
If you have separate lists of `x` values and `y` values which you
want to plot against each other, use the ``zip`` command to make a
single list whose entries are pairs of `(x,y)` values, and feed
the result into ``list_plot``::
sage: x_coords = [cos(t)^3 for t in srange(0, 2*pi, 0.02)]
sage: y_coords = [sin(t)^3 for t in srange(0, 2*pi, 0.02)]
sage: list_plot(list(zip(x_coords, y_coords)))
Graphics object consisting of 1 graphics primitive
.. PLOT::
x_coords = [cos(t)**3 for t in srange(0, 2*pi, 0.02)]
y_coords = [sin(t)**3 for t in srange(0, 2*pi, 0.02)]
g = list_plot(list(zip(x_coords, y_coords)))
sphinx_plot(g)
If instead you try to pass the two lists as separate arguments,
you will get an error message::
sage: list_plot(x_coords, y_coords)
Traceback (most recent call last):
...
TypeError: The second argument 'plotjoined' should be boolean (True or False). If you meant to plot two lists 'x' and 'y' against each other, use 'list_plot(list(zip(x,y)))'.
Dictionaries with numeric keys and values can be plotted::
sage: list_plot({22: 3365, 27: 3295, 37: 3135, 42: 3020, 47: 2880, 52: 2735, 57: 2550})
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot({22: 3365, 27: 3295, 37: 3135, 42: 3020, 47: 2880, 52: 2735, 57: 2550})
sphinx_plot(g)
Plotting in logarithmic scale is possible for 2D list plots.
There are two different syntaxes available::
sage: yl = [2**k for k in range(20)]
sage: list_plot(yl, scale='semilogy') # long time # log axis on vertical
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [2**k for k in range(20)]
g = list_plot(yl, scale='semilogy') # long time # log axis on vertical
sphinx_plot(g)
::
sage: list_plot_semilogy(yl) # same
Graphics object consisting of 1 graphics primitive
.. warning::
If ``plotjoined`` is ``False`` then the axis that is in log scale
must have all points strictly positive. For instance, the following
plot will show no points in the figure since the points in the
horizontal axis starts from `(0,1)`. Further, matplotlib will display
a user warning.
::
sage: list_plot(yl, scale='loglog') # both axes are log
doctest:warning
...
Graphics object consisting of 1 graphics primitive
Instead this will work. We drop the point `(0,1)`.::
sage: list_plot(list(zip(range(1,len(yl)), yl[1:])), scale='loglog') # long time
Graphics object consisting of 1 graphics primitive
We use :func:`list_plot_loglog` and plot in a different base.::
sage: list_plot_loglog(list(zip(range(1,len(yl)), yl[1:])), base=2) # long time
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [2**k for k in range(20)]
g = list_plot_loglog(list(zip(range(1,len(yl)), yl[1:])), base=2) # long time
sphinx_plot(g)
We can also change the scale of the axes in the graphics just before
displaying::
sage: G = list_plot(yl) # long time
sage: G.show(scale=('semilogy', 2)) # long time
TESTS:
We check to see whether elements of the Symbolic Ring are properly
handled; see :trac:`16378` ::
sage: list_plot([1+I, 2+I])
Graphics object consisting of 1 graphics primitive
sage: list_plot([1+I, 2, CC(3+I)])
Graphics object consisting of 1 graphics primitive
sage: list_plot([2, SR(1), CC(1+i)])
Graphics object consisting of 1 graphics primitive
We check to see that the x/y min/max data are set correctly::
sage: d = list_plot([(100,100), (120, 120)]).get_minmax_data()
sage: d['xmin']
100.0
sage: d['ymin']
100.0
"""
from sage.plot.all import point
try:
if not data:
return Graphics()
except ValueError: # numpy raises ValueError if it is not empty
pass
if not isinstance(plotjoined, bool):
raise TypeError("The second argument 'plotjoined' should be boolean "
"(True or False). If you meant to plot two lists 'x' "
"and 'y' against each other, use 'list_plot(list(zip(x,y)))'.")
if isinstance(data, dict):
if plotjoined:
list_data = sorted(list(iteritems(data)))
else:
list_data = list(iteritems(data))
return list_plot(list_data, plotjoined=plotjoined, **kwargs)
try:
from sage.rings.all import RDF
RDF(data[0])
data = list(enumerate(data))
except TypeError: # we can get this TypeError if the element is a list
# or tuple or numpy array, or an element of CC, CDF
# We also want to avoid doing CC(data[0]) here since it will go
# through if data[0] is really a tuple and every element of the
# data will be converted to a complex and later converted back to
# a tuple.
# So, the only other check we need to do is whether data[0] is an
# element of the Symbolic Ring.
if data[0] in sage.symbolic.ring.SR:
data = list(enumerate(data))
try:
if plotjoined:
return line(data, **kwargs)
else:
return point(data, **kwargs)
except (TypeError, IndexError):
# Assume we have complex-valued input and plot real and imaginary parts.
# Need to catch IndexError because if data is, say, [(0, 1), (1, I)],
# point3d() throws an IndexError on the (0,1) before it ever
# gets to (1, I).
from sage.rings.complex_field import ComplexField
CC = ComplexField()
# if we get here, we already did "list(enumerate(data))",
# so look at z[1] in inner list
data = [(z.real(), z.imag()) for z in [CC(z[1]) for z in data]]
if plotjoined:
return line(data, **kwargs)
else:
return point(data, **kwargs)
#------------------------ Graphs on log scale ---------------------------#
@options(base=10)
def plot_loglog(funcs, *args, **kwds):
"""
Plot graphics in 'loglog' scale, that is, both the horizontal and the
vertical axes will be in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1. The base can be also given as a list or tuple
``(basex, basey)``. ``basex`` sets the base of the logarithm along the
horizontal axis and ``basey`` sets the base along the vertical axis.
- ``funcs`` -- any Sage object which is acceptable to the :func:`plot`.
For all other inputs, look at the documentation of :func:`plot`.
EXAMPLES::
sage: plot_loglog(exp, (1,10)) # plot in loglog scale with base 10
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_loglog(exp, (1,10)) # plot in loglog scale with base 10
sphinx_plot(g)
::
sage: plot_loglog(exp, (1,10), base=2.1) # long time # with base 2.1 on both axes
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_loglog(exp, (1,10), base=2.1) # long time # with base 2.1 on both axes
sphinx_plot(g)
::
sage: plot_loglog(exp, (1,10), base=(2,3))
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_loglog(exp, (1,10), base=(2,3))
sphinx_plot(g)
"""
return plot(funcs, *args, scale='loglog', **kwds)
@options(base=10)
def plot_semilogx(funcs, *args, **kwds):
"""
Plot graphics in 'semilogx' scale, that is, the horizontal axis will be
in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1.
- ``funcs`` -- any Sage object which is acceptable to the :func:`plot`.
For all other inputs, look at the documentation of :func:`plot`.
EXAMPLES::
sage: plot_semilogx(exp, (1,10)) # long time # plot in semilogx scale, base 10
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_semilogx(exp, (1,10)) # long time # plot in semilogx scale, base 10
sphinx_plot(g)
::
sage: plot_semilogx(exp, (1,10), base=2) # with base 2
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_semilogx(exp, (1,10), base=2) # with base 2
sphinx_plot(g)
"""
return plot(funcs, *args, scale='semilogx', **kwds)
@options(base=10)
def plot_semilogy(funcs, *args, **kwds):
"""
Plot graphics in 'semilogy' scale, that is, the vertical axis will be
in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1.
- ``funcs`` -- any Sage object which is acceptable to the :func:`plot`.
For all other inputs, look at the documentation of :func:`plot`.
EXAMPLES::
sage: plot_semilogy(exp, (1,10)) # long time # plot in semilogy scale, base 10
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_semilogy(exp, (1,10)) # long time # plot in semilogy scale, base 10
sphinx_plot(g)
::
sage: plot_semilogy(exp, (1,10), base=2) # long time # with base 2
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = plot_semilogy(exp, (1,10), base=2) # long time # with base 2
sphinx_plot(g)
"""
return plot(funcs, *args, scale='semilogy', **kwds)
@options(base=10)
def list_plot_loglog(data, plotjoined=False, **kwds):
"""
Plot the ``data`` in 'loglog' scale, that is, both the horizontal and the
vertical axes will be in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1. The base can be also given as a list or tuple
``(basex, basey)``. ``basex`` sets the base of the logarithm along the
horizontal axis and ``basey`` sets the base along the vertical axis.
For all other inputs, look at the documentation of :func:`list_plot`.
EXAMPLES::
sage: yl = [5**k for k in range(10)]; xl = [2**k for k in range(10)]
sage: list_plot_loglog(list(zip(xl, yl))) # long time # plot in loglog scale with base 10
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [5**k for k in range(10)]
xl = [2**k for k in range(10)]
g = list_plot_loglog(list(zip(xl, yl))) # long time # plot in loglog scale with base 10
sphinx_plot(g)
::
sage: list_plot_loglog(list(zip(xl, yl)), base=2.1) # long time # with base 2.1 on both axes
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [5**k for k in range(10)]
xl = [2**k for k in range(10)]
g = list_plot_loglog(list(zip(xl, yl)), base=2.1) # long time # with base 2.1 on both axes
sphinx_plot(g)
::
sage: list_plot_loglog(list(zip(xl, yl)), base=(2,5)) # long time
Graphics object consisting of 1 graphics primitive
.. warning::
If ``plotjoined`` is ``False`` then the axis that is in log scale
must have all points strictly positive. For instance, the following
plot will show no points in the figure since the points in the
horizontal axis starts from `(0,1)`.
::
sage: yl = [2**k for k in range(20)]
sage: list_plot_loglog(yl)
Graphics object consisting of 1 graphics primitive
Instead this will work. We drop the point `(0,1)`.::
sage: list_plot_loglog(list(zip(range(1,len(yl)), yl[1:])))
Graphics object consisting of 1 graphics primitive
"""
return list_plot(data, plotjoined=plotjoined, scale='loglog', **kwds)
@options(base=10)
def list_plot_semilogx(data, plotjoined=False, **kwds):
"""
Plot ``data`` in 'semilogx' scale, that is, the horizontal axis will be
in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1.
For all other inputs, look at the documentation of :func:`list_plot`.
EXAMPLES::
sage: yl = [2**k for k in range(12)]
sage: list_plot_semilogx(list(zip(yl,yl)))
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [2**k for k in range(12)]
g = list_plot_semilogx(list(zip(yl,yl)))
sphinx_plot(g)
.. warning::
If ``plotjoined`` is ``False`` then the horizontal axis must have all
points strictly positive. Otherwise the plot will come up empty.
For instance the following plot contains a point at `(0,1)`.
::
sage: yl = [2**k for k in range(12)]
sage: list_plot_semilogx(yl) # plot empty due to (0,1)
Graphics object consisting of 1 graphics primitive
We remove `(0,1)` to fix this.::
sage: list_plot_semilogx(list(zip(range(1, len(yl)), yl[1:])))
Graphics object consisting of 1 graphics primitive
::
sage: list_plot_semilogx([(1,2),(3,4),(3,-1),(25,3)], base=2) # with base 2
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot_semilogx([(1,2),(3,4),(3,-1),(25,3)], base=2) # with base 2
sphinx_plot(g)
"""
return list_plot(data, plotjoined=plotjoined, scale='semilogx', **kwds)
@options(base=10)
def list_plot_semilogy(data, plotjoined=False, **kwds):
"""
Plot ``data`` in 'semilogy' scale, that is, the vertical axis will be
in logarithmic scale.
INPUT:
- ``base`` -- (default: 10) the base of the logarithm. This must be
greater than 1.
For all other inputs, look at the documentation of :func:`list_plot`.
EXAMPLES::
sage: yl = [2**k for k in range(12)]
sage: list_plot_semilogy(yl) # plot in semilogy scale, base 10
Graphics object consisting of 1 graphics primitive
.. PLOT::
yl = [2**k for k in range(12)]
g = list_plot_semilogy(yl) # plot in semilogy scale, base 10
sphinx_plot(g)
.. warning::
If ``plotjoined`` is ``False`` then the vertical axis must have all
points strictly positive. Otherwise the plot will come up empty.
For instance the following plot contains a point at `(1,0)`. Further,
matplotlib will display a user warning.
::
sage: xl = [2**k for k in range(12)]; yl = range(len(xl))
sage: list_plot_semilogy(list(zip(xl,yl))) # plot empty due to (1,0)
doctest:warning
...
Graphics object consisting of 1 graphics primitive
We remove `(1,0)` to fix this.::
sage: list_plot_semilogy(list(zip(xl[1:],yl[1:])))
Graphics object consisting of 1 graphics primitive
::
sage: list_plot_semilogy([2, 4, 6, 8, 16, 31], base=2) # with base 2
Graphics object consisting of 1 graphics primitive
.. PLOT::
g = list_plot_semilogy([2, 4, 6, 8, 16, 31], base=2) # with base 2
sphinx_plot(g)
"""
return list_plot(data, plotjoined=plotjoined, scale='semilogy', **kwds)
def to_float_list(v):
"""
Given a list or tuple or iterable v, coerce each element of v to a
float and make a list out of the result.
EXAMPLES::
sage: from sage.plot.plot import to_float_list
sage: to_float_list([1,1/2,3])
[1.0, 0.5, 3.0]
"""
return [float(x) for x in v]
def reshape(v, n, m):
"""
Helper function for creating graphics arrays.
The input array is flattened and turned into an `n\times m`
array, with blank graphics object padded at the end, if
necessary.
INPUT:
- ``v`` - a list of lists or tuples
- ``n, m`` - integers
OUTPUT:
A list of lists of graphics objects
EXAMPLES::
sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in range(10)]
sage: graphics_array(L,3,4) # long time (up to 4s on sage.math, 2012)
Graphics Array of size 3 x 4
::
sage: M = [[plot(sin(k*x),(x,-pi,pi)) for k in range(3)],[plot(cos(j*x),(x,-pi,pi)) for j in [3..5]]]
sage: graphics_array(M,6,1) # long time (up to 4s on sage.math, 2012)
Graphics Array of size 6 x 1
TESTS::
sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in [1..3]]
sage: graphics_array(L,0,-1) # indirect doctest
Traceback (most recent call last):
...
ValueError: array sizes must be positive
"""
if not (n > 0 and m > 0):
raise ValueError('array sizes must be positive')
G = Graphics()
G.axes(False)
if len(v) == 0:
return [[G]*m]*n
if not isinstance(v[0], Graphics):
# a list of lists -- flatten it
v = sum([list(x) for x in v], [])
# Now v should be a single list.
# First, make it have the right length.
v = list(v) # do not mutate the argument
for i in range(n * m - len(v)):
v.append(G)
# Next, create a list of lists out of it.
L = []
k = 0
for i in range(n):
w = []
for j in range(m):
w.append(v[k])
k += 1
L.append(w)
return L
def graphics_array(array, nrows=None, ncols=None):
r"""
``graphics_array`` take a list of lists (or tuples) of
graphics objects and plots them all on one canvas (single plot).
INPUT:
- ``array`` -- a list of lists or tuples. The graphics objects to
combine into a graphics array.
- ``nrows, ncols`` -- (optional) integers. If both are given then
the input array is flattened and turned into an ``nrows`` x
``ncols`` array, with blank graphics objects padded at the end,
if necessary. If only one is specified, the other is chosen
automatically.
EXAMPLES: Make some plots of `\sin` functions::
sage: f(x) = sin(x)
sage: g(x) = sin(2*x)
sage: h(x) = sin(4*x)
sage: p1 = plot(f,(-2*pi,2*pi),color=hue(0.5)) # long time
sage: p2 = plot(g,(-2*pi,2*pi),color=hue(0.9)) # long time
sage: p3 = parametric_plot((f,g),(0,2*pi),color=hue(0.6)) # long time
sage: p4 = parametric_plot((f,h),(0,2*pi),color=hue(1.0)) # long time
Now make a graphics array out of the plots::
sage: graphics_array(((p1,p2),(p3,p4))) # long time
Graphics Array of size 2 x 2
.. PLOT::
def f(x): return sin(x)
def g(x): return sin(2*x)
def h(x): return sin(4*x)
p1 = plot(f,(-2*pi,2*pi),color=hue(0.5)) # long time
p2 = plot(g,(-2*pi,2*pi),color=hue(0.9)) # long time
p3 = parametric_plot((f,g),(0,2*pi),color=hue(0.6)) # long time
p4 = parametric_plot((f,h),(0,2*pi),color=hue(1.0)) # long time
g = graphics_array(((p1,p2),(p3,p4))) # long time
sphinx_plot(g)
One can also name the array, and then use :meth:`~sage.plot.graphics.GraphicsArray.show`
or :meth:`~sage.plot.graphics.GraphicsArray.save`::
sage: ga = graphics_array(((p1,p2),(p3,p4))) # long time
sage: ga.show() # long time
Here we give only one row::
sage: p1 = plot(sin,(-4,4))
sage: p2 = plot(cos,(-4,4))
sage: g = graphics_array([p1, p2]); print(g)
Graphics Array of size 1 x 2
sage: g.show()
.. PLOT::
p1 = plot(sin,(-4,4))
p2 = plot(cos,(-4,4))
g = graphics_array([p1, p2])
sphinx_plot(g)
It is possible to use ``figsize`` to change the size of the plot
as a whole::
sage: L = [plot(sin(k*x),(x,-pi,pi)) for k in [1..3]]
sage: G = graphics_array(L)
sage: G.show(figsize=[5,3]) # smallish and compact
sage: G.show(figsize=[10,20]) # bigger and tall and thin; long time (2s on sage.math, 2012)
sage: G.show(figsize=8) # figure as a whole is a square
Specifying only the number of rows or the number of columns
computes the other dimension automatically::
sage: ga = graphics_array([plot(sin)] * 10, nrows=3)
sage: ga.nrows(), ga.ncols()
(3, 4)
sage: ga = graphics_array([plot(sin)] * 10, ncols=3)
sage: ga.nrows(), ga.ncols()
(4, 3)
"""
# TODO: refactor the whole array flattening and reshaping into a class
if nrows is None and ncols is None:
pass
elif nrows is not None and ncols is not None:
nrows = int(nrows)
ncols = int(ncols)
array = reshape(array, nrows, ncols)
else:
# nrows is None xor ncols is None
if len(array) > 0 and isinstance(array[0], Graphics):
length = len(array)
else:
length = sum(map(len, array))
if nrows is None:
ncols = int(ncols)
nrows = length // ncols + 1
elif ncols is None:
nrows = int(nrows)
ncols = length // nrows + 1
else:
assert False
array = reshape(array, nrows, ncols)
return GraphicsArray(array)
def var_and_list_of_values(v, plot_points):
"""
INPUT:
- ``v`` - (v0, v1) or (var, v0, v1); if the former
return the range of values between v0 and v1 taking plot_points
steps; if var is given, also return var.
- ``plot_points`` - integer = 2 (the endpoints)
OUTPUT:
- ``var`` - a variable or None
- ``list`` - a list of floats
EXAMPLES::
sage: from sage.plot.plot import var_and_list_of_values
sage: var_and_list_of_values((var('theta'), 2, 5), 5)
doctest:...: DeprecationWarning: var_and_list_of_values is deprecated. Please use sage.plot.misc.setup_for_eval_on_grid; note that that function has slightly different calling and return conventions which make it more generally applicable
See http://trac.sagemath.org/7008 for details.
(theta, [2.0, 2.75, 3.5, 4.25, 5.0])
sage: var_and_list_of_values((2, 5), 5)
(None, [2.0, 2.75, 3.5, 4.25, 5.0])
sage: var_and_list_of_values((var('theta'), 2, 5), 2)
(theta, [2.0, 5.0])
sage: var_and_list_of_values((2, 5), 2)
(None, [2.0, 5.0])
"""
from sage.misc.superseded import deprecation
deprecation(7008, "var_and_list_of_values is deprecated. Please use sage.plot.misc.setup_for_eval_on_grid; note that that function has slightly different calling and return conventions which make it more generally applicable")
plot_points = int(plot_points)
if plot_points < 2:
raise ValueError("plot_points must be greater than 1")
if not isinstance(v, (tuple, list)):
raise TypeError("v must be a tuple or list")
if len(v) == 3:
var = v[0]
a, b = v[1], v[2]
elif len(v) == 2:
var = None
a, b = v
else:
raise ValueError("parametric value range must be a list or tuple of length 2 or 3.")
a = float(a)
b = float(b)
if plot_points == 2:
return var, [a, b]
else:
step = (b-a)/float(plot_points-1)
values = [a + step * i for i in range(plot_points)]
return var, values
def setup_for_eval_on_grid(v, xrange, yrange, plot_points):
"""
This function is deprecated. Please use
``sage.plot.misc.setup_for_eval_on_grid`` instead. Please note that
that function has slightly different calling and return
conventions which make it more generally applicable.
INPUT:
- ``v`` - a list of functions
- ``xrange`` - 2 or 3 tuple (if 3, first is a
variable)
- ``yrange`` - 2 or 3 tuple
- ``plot_points`` - a positive integer
OUTPUT:
- ``g`` - tuple of fast callable functions
- ``xstep`` - step size in xdirection
- ``ystep`` - step size in ydirection
- ``xrange`` - tuple of 2 floats
- ``yrange`` - tuple of 2 floats
EXAMPLES::
sage: x,y = var('x,y')
sage: sage.plot.plot.setup_for_eval_on_grid([x^2 + y^2], (x,0,5), (y,0,pi), 11)
doctest:...: DeprecationWarning: sage.plot.plot.setup_for_eval_on_grid is deprecated. Please use sage.plot.misc.setup_for_eval_on_grid; note that that function has slightly different calling and return conventions which make it more generally applicable
See http://trac.sagemath.org/7008 for details.
([<sage.ext... object at ...>],
0.5,
0.3141592653589793,
(0.0, 5.0),
(0.0, 3.141592653589793))
We always plot at least two points; one at the beginning and one at the end of the ranges.
::
sage: sage.plot.plot.setup_for_eval_on_grid([x^2+y^2], (x,0,1), (y,-1,1), 1)
([<sage.ext... object at ...>],
1.0,
2.0,
(0.0, 1.0),
(-1.0, 1.0))
"""
from sage.misc.superseded import deprecation
deprecation(7008, "sage.plot.plot.setup_for_eval_on_grid is deprecated. Please use sage.plot.misc.setup_for_eval_on_grid; note that that function has slightly different calling and return conventions which make it more generally applicable")
from sage.plot.misc import setup_for_eval_on_grid as setup
g, ranges=setup(v, [xrange, yrange], plot_points)
return list(g), ranges[0][2], ranges[1][2], ranges[0][:2], ranges[1][:2]
def minmax_data(xdata, ydata, dict=False):
"""
Returns the minimums and maximums of xdata and ydata.
If dict is False, then minmax_data returns the tuple (xmin, xmax,
ymin, ymax); otherwise, it returns a dictionary whose keys are
'xmin', 'xmax', 'ymin', and 'ymax' and whose values are the
corresponding values.
EXAMPLES::
sage: from sage.plot.plot import minmax_data
sage: minmax_data([], [])
(-1, 1, -1, 1)
sage: minmax_data([-1, 2], [4, -3])
(-1, 2, -3, 4)
sage: d = minmax_data([-1, 2], [4, -3], dict=True)
sage: list(sorted(d.items()))
[('xmax', 2), ('xmin', -1), ('ymax', 4), ('ymin', -3)]
"""
xmin = min(xdata) if len(xdata) > 0 else -1
xmax = max(xdata) if len(xdata) > 0 else 1
ymin = min(ydata) if len(ydata) > 0 else -1
ymax = max(ydata) if len(ydata) > 0 else 1
if dict:
return {'xmin':xmin, 'xmax':xmax,
'ymin':ymin, 'ymax':ymax}
else:
return xmin, xmax, ymin, ymax
def adaptive_refinement(f, p1, p2, adaptive_tolerance=0.01, adaptive_recursion=5, level=0):
r"""
The adaptive refinement algorithm for plotting a function ``f``. See
the docstring for plot for a description of the algorithm.
INPUT:
- ``f`` - a function of one variable
- ``p1, p2`` - two points to refine between
- ``adaptive_recursion`` - (default: 5) how many
levels of recursion to go before giving up when doing adaptive
refinement. Setting this to 0 disables adaptive refinement.
- ``adaptive_tolerance`` - (default: 0.01) how large
a relative difference should be before the adaptive refinement
code considers it significant; see documentation for generate_plot_points
for more information. See the documentation for :func:`plot` for more
information on how the adaptive refinement algorithm works.
OUTPUT:
- ``list`` - a list of points to insert between ``p1`` and
``p2`` to get a better linear approximation between them
TESTS::
sage: from sage.plot.plot import adaptive_refinement
sage: adaptive_refinement(sin, (0,0), (pi,0), adaptive_tolerance=0.01, adaptive_recursion=0)
[]
sage: adaptive_refinement(sin, (0,0), (pi,0), adaptive_tolerance=0.01)
[(0.125*pi, 0.3826834323650898), (0.1875*pi, 0.5555702330196022), (0.25*pi, 0.7071067811865475), (0.3125*pi, 0.8314696123025452), (0.375*pi, 0.9238795325112867), (0.4375*pi, 0.9807852804032304), (0.5*pi, 1.0), (0.5625*pi, 0.9807852804032304), (0.625*pi, 0.9238795325112867), (0.6875*pi, 0.8314696123025455), (0.75*pi, 0.7071067811865476), (0.8125*pi, 0.5555702330196022), (0.875*pi, 0.3826834323650899)]
This shows that lowering ``adaptive_tolerance`` and raising
``adaptive_recursion`` both increase the number of subdivision
points, though which one creates more points is heavily
dependent upon the function being plotted.
::
sage: x = var('x')
sage: f(x) = sin(1/x)
sage: n1 = len(adaptive_refinement(f, (0,0), (pi,0), adaptive_tolerance=0.01)); n1
15
sage: n2 = len(adaptive_refinement(f, (0,0), (pi,0), adaptive_recursion=10, adaptive_tolerance=0.01)); n2
79
sage: n3 = len(adaptive_refinement(f, (0,0), (pi,0), adaptive_tolerance=0.001)); n3
26
"""
if level >= adaptive_recursion:
return []
x = (p1[0] + p2[0])/2.0
msg = ''
try:
y = float(f(x))
if str(y) in ['nan', 'NaN', 'inf', '-inf']:
sage.misc.misc.verbose("%s\nUnable to compute f(%s)"%(msg, x),1)
# give up for this branch
return []
except (ZeroDivisionError, TypeError, ValueError, OverflowError) as msg:
sage.misc.misc.verbose("%s\nUnable to compute f(%s)"%(msg, x), 1)
# give up for this branch
return []
# this distance calculation is not perfect.
if abs((p1[1] + p2[1])/2.0 - y) > adaptive_tolerance:
return adaptive_refinement(f, p1, (x, y),
adaptive_tolerance=adaptive_tolerance,
adaptive_recursion=adaptive_recursion,
level=level+1) \
+ [(x, y)] + \
adaptive_refinement(f, (x, y), p2,
adaptive_tolerance=adaptive_tolerance,
adaptive_recursion=adaptive_recursion,
level=level+1)
else:
return []
def generate_plot_points(f, xrange, plot_points=5, adaptive_tolerance=0.01, adaptive_recursion=5, randomize=True, initial_points=None):
r"""
Calculate plot points for a function f in the interval xrange. The
adaptive refinement algorithm is also automatically invoked with a
*relative* adaptive tolerance of adaptive_tolerance; see below.
INPUT:
- ``f`` - a function of one variable
- ``p1, p2`` - two points to refine between
- ``plot_points`` - (default: 5) the minimal number of plot points. (Note
however that in any actual plot a number is passed to this, with default
value 200.)
- ``adaptive_recursion`` - (default: 5) how many levels of recursion to go
before giving up when doing adaptive refinement. Setting this to 0
disables adaptive refinement.
- ``adaptive_tolerance`` - (default: 0.01) how large the relative difference
should be before the adaptive refinement code considers it significant. If
the actual difference is greater than adaptive_tolerance*delta, where delta
is the initial subinterval size for the given xrange and plot_points, then
the algorithm will consider it significant.
- ``initial_points`` - (default: None) a list of points that should be evaluated.
OUTPUT:
- a list of points (x, f(x)) in the interval xrange, which approximate
the function f.
TESTS::
sage: from sage.plot.plot import generate_plot_points
sage: generate_plot_points(sin, (0, pi), plot_points=2, adaptive_recursion=0)
[(0.0, 0.0), (3.141592653589793, 1.2246...e-16)]
sage: from sage.plot.plot import generate_plot_points
sage: generate_plot_points(lambda x: x^2, (0, 6), plot_points=2, adaptive_recursion=0, initial_points=[1,2,3])
[(0.0, 0.0), (1.0, 1.0), (2.0, 4.0), (3.0, 9.0), (6.0, 36.0)]
sage: generate_plot_points(sin(x).function(x), (-pi, pi), randomize=False)
[(-3.141592653589793, -1.2246...e-16), (-2.748893571891069,
-0.3826834323650899), (-2.356194490192345, -0.707106781186547...),
(-2.1598449493429825, -0.831469612302545...), (-1.9634954084936207,
-0.9238795325112867), (-1.7671458676442586, -0.9807852804032304),
(-1.5707963267948966, -1.0), (-1.3744467859455345,
-0.9807852804032304), (-1.1780972450961724, -0.9238795325112867),
(-0.9817477042468103, -0.831469612302545...), (-0.7853981633974483,
-0.707106781186547...), (-0.39269908169872414, -0.3826834323650898),
(0.0, 0.0), (0.39269908169872414, 0.3826834323650898),
(0.7853981633974483, 0.707106781186547...), (0.9817477042468103,
0.831469612302545...), (1.1780972450961724, 0.9238795325112867),
(1.3744467859455345, 0.9807852804032304), (1.5707963267948966, 1.0),
(1.7671458676442586, 0.9807852804032304), (1.9634954084936207,
0.9238795325112867), (2.1598449493429825, 0.831469612302545...),
(2.356194490192345, 0.707106781186547...), (2.748893571891069,
0.3826834323650899), (3.141592653589793, 1.2246...e-16)]
This shows that lowering adaptive_tolerance and raising
adaptive_recursion both increase the number of subdivision points.
(Note that which creates more points is heavily dependent on the
particular function plotted.)
::
sage: x = var('x')
sage: f(x) = sin(1/x)
sage: [len(generate_plot_points(f, (-pi, pi), plot_points=16, adaptive_tolerance=i, randomize=False)) for i in [0.01, 0.001, 0.0001]]
[97, 161, 275]
sage: [len(generate_plot_points(f, (-pi, pi), plot_points=16, adaptive_recursion=i, randomize=False)) for i in [5, 10, 15]]
[97, 499, 2681]
"""
from sage.plot.misc import setup_for_eval_on_grid
ignore, ranges = setup_for_eval_on_grid([], [xrange], plot_points)
xmin, xmax, delta = ranges[0]
data = srange(*ranges[0], include_endpoint=True)
random = current_randstate().python_random().random
for i in range(len(data)):
xi = data[i]
# Slightly randomize the interior sample points if
# randomize is true
if randomize and i > 0 and i < plot_points-1:
xi += delta*(random() - 0.5)
data[i] = xi
# add initial points
if isinstance(initial_points, list):
data = sorted(data + initial_points)
exceptions = 0
exception_indices = []
for i in range(len(data)):
xi = data[i]
try:
data[i] = (float(xi), float(f(xi)))
if str(data[i][1]) in ['nan', 'NaN', 'inf', '-inf']:
msg = "Unable to compute f(%s)" % xi
sage.misc.misc.verbose(msg, 1)
exceptions += 1
exception_indices.append(i)
except (ArithmeticError, TypeError, ValueError) as m:
sage.misc.misc.verbose("%s\nUnable to compute f(%s)" % (m, xi), 1)
if i == 0: # Given an error for left endpoint, try to move it in slightly
for j in range(1, 99):
xj = xi + delta*j/100.0
try:
data[i] = (float(xj), float(f(xj)))
# nan != nan
if data[i][1] != data[i][1]:
continue
break
except (ArithmeticError, TypeError, ValueError):
pass
else:
msg = m
exceptions += 1
exception_indices.append(i)
elif i == plot_points-1: # Given an error for right endpoint, try to move it in slightly
for j in range(1, 99):
xj = xi - delta*j/100.0
try:
data[i] = (float(xj), float(f(xj)))
# nan != nan
if data[i][1] != data[i][1]:
continue
break
except (ArithmeticError, TypeError, ValueError):
pass
else:
msg = m
exceptions += 1
exception_indices.append(i)
else:
msg = m
exceptions += 1
exception_indices.append(i)
data = [data[i] for i in range(len(data)) if i not in exception_indices]
# calls adaptive refinement
i, j = 0, 0
adaptive_tolerance = delta * float(adaptive_tolerance)
adaptive_recursion = int(adaptive_recursion)
while i < len(data) - 1:
for p in adaptive_refinement(f, data[i], data[i+1],
adaptive_tolerance=adaptive_tolerance,
adaptive_recursion=adaptive_recursion):
data.insert(i+1, p)
i += 1
i += 1
if (len(data) == 0 and exceptions > 0) or exceptions > 10:
sage.misc.misc.verbose("WARNING: When plotting, failed to evaluate function at %s points." % exceptions, level=0)
sage.misc.misc.verbose("Last error message: '%s'" % msg, level=0)
return data
| 34.461832
| 411
| 0.588917
|
5211f4da0aaad5f64aa281de472912921aa52214
| 2,558
|
py
|
Python
|
examples/kernel_lsq.py
|
joshuagornall/jax
|
c97cd0a526c12ad81988fd58c1c66df4ddd71813
|
[
"ECL-2.0",
"Apache-2.0"
] | 17,375
|
2018-11-18T02:15:55.000Z
|
2022-03-31T23:49:46.000Z
|
examples/kernel_lsq.py
|
joshuagornall/jax
|
c97cd0a526c12ad81988fd58c1c66df4ddd71813
|
[
"ECL-2.0",
"Apache-2.0"
] | 5,018
|
2018-11-22T17:04:07.000Z
|
2022-03-31T23:36:25.000Z
|
examples/kernel_lsq.py
|
joshuagornall/jax
|
c97cd0a526c12ad81988fd58c1c66df4ddd71813
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,805
|
2018-11-21T10:13:53.000Z
|
2022-03-31T23:49:19.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy.random as npr
import jax.numpy as jnp
from jax.experimental import optimizers
from jax import grad, jit, make_jaxpr, vmap, lax
def gram(kernel, xs):
'''Compute a Gram matrix from a kernel and an array of data points.
Args:
kernel: callable, maps pairs of data points to scalars.
xs: array of data points, stacked along the leading dimension.
Returns:
A 2d array `a` such that `a[i, j] = kernel(xs[i], xs[j])`.
'''
return vmap(lambda x: vmap(lambda y: kernel(x, y))(xs))(xs)
def minimize(f, x, num_steps=10000, step_size=0.000001, mass=0.9):
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass)
@jit
def update(i, opt_state):
x = get_params(opt_state)
return opt_update(i, grad(f)(x), opt_state)
opt_state = opt_init(x)
for i in range(num_steps):
opt_state = update(i, opt_state)
return get_params(opt_state)
def train(kernel, xs, ys, regularization=0.01):
gram_ = jit(partial(gram, kernel))
gram_mat = gram_(xs)
n = xs.shape[0]
def objective(v):
risk = .5 * jnp.sum((jnp.dot(gram_mat, v) - ys) ** 2.0)
reg = regularization * jnp.sum(v ** 2.0)
return risk + reg
v = minimize(objective, jnp.zeros(n))
def predict(x):
prods = vmap(lambda x_: kernel(x, x_))(xs)
return jnp.sum(v * prods)
return jit(vmap(predict))
if __name__ == "__main__":
n = 100
d = 20
# linear kernel
linear_kernel = lambda x, y: jnp.dot(x, y, precision=lax.Precision.HIGH)
truth = npr.randn(d)
xs = npr.randn(n, d)
ys = jnp.dot(xs, truth)
predict = train(linear_kernel, xs, ys)
print('MSE:', jnp.sum((predict(xs) - ys) ** 2.))
def gram_jaxpr(kernel):
return make_jaxpr(partial(gram, kernel))(xs)
rbf_kernel = lambda x, y: jnp.exp(-jnp.sum((x - y) ** 2))
print()
print('jaxpr of gram(linear_kernel):')
print(gram_jaxpr(linear_kernel))
print()
print('jaxpr of gram(rbf_kernel):')
print(gram_jaxpr(rbf_kernel))
| 26.371134
| 74
| 0.685301
|
76ce937377293eda6b9e042c19c685b83a4560d9
| 3,047
|
py
|
Python
|
tests/afftc_test.py
|
huonw/sisfft-py
|
3cf048022c6641a9c5d60b6db01734fa414e6d7b
|
[
"MIT"
] | 3
|
2018-10-25T20:52:22.000Z
|
2020-07-30T03:19:10.000Z
|
tests/afftc_test.py
|
huonw/sisfft-py
|
3cf048022c6641a9c5d60b6db01734fa414e6d7b
|
[
"MIT"
] | null | null | null |
tests/afftc_test.py
|
huonw/sisfft-py
|
3cf048022c6641a9c5d60b6db01734fa414e6d7b
|
[
"MIT"
] | 1
|
2020-05-19T02:59:46.000Z
|
2020-05-19T02:59:46.000Z
|
import numpy as np
import unittest
from sisfft.timer import timer
import sisfft
afftc = sisfft._afftc
#logging.basicConfig(level = logging.INFO)
TEST_REPEATS = 20
TEST_LENGTH = 100
def afftc_(self, beta, delta):
np.random.seed(1)
for _ in range(0, TEST_REPEATS):
v1 = np.random.rand(TEST_LENGTH)
v1 /= v1.sum()
v2 = np.random.rand(TEST_LENGTH)
v2 /= v2.sum()
with timer('naive'):
real = np.convolve(v1, v2)
with timer('afftc'):
hopeful = np.exp(afftc.convolve(np.log(v1), np.log(v2), beta, delta))
lower = (1 - beta) * real - 2 * delta
upper = (1 + beta) * real
between = (lower <= hopeful) & (hopeful <= upper)
not_between = np.invert(between)
self.assertTrue(between.all(),
'%s\n%s' % (hopeful[not_between], real[not_between]))
def afftc_square_(self, beta, delta):
np.random.seed(1)
for _ in range(0, TEST_REPEATS):
v = np.random.rand(TEST_LENGTH)
v /= v.sum()
with timer('naive'):
real = np.convolve(v, v)
with timer('afftc'):
hopeful = np.exp(afftc.convolve_square(np.log(v), beta, delta))
lower = (1 - beta) * real - 2 * delta
upper = (1 + beta) * real
between = (lower <= hopeful) & (hopeful <= upper)
not_between = np.invert(between)
self.assertTrue(between.all(),
'%s\n%s' % (hopeful[not_between], real[not_between]))
def afftc_no_lower_bound_(self, beta):
np.random.seed(1)
for _ in range(0, TEST_REPEATS):
v1 = np.random.rand(TEST_LENGTH)
v1 /= v1.sum()
v2 = np.random.rand(TEST_LENGTH)
v2 /= v2.sum()
with timer('naive'):
real = np.convolve(v1, v2)
with timer('afftc'):
hopeful = np.exp(afftc.convolve(np.log(v1), np.log(v2), beta, None))
lower = (1 - beta) * real
upper = (1 + beta) * real
between = (lower <= hopeful) & (hopeful <= upper)
not_between = np.invert(between)
self.assertTrue(between.all(),
'%s\n%s' % (hopeful[not_between], real[not_between]))
class Afftc(unittest.TestCase):
pass
for log10_alpha in range(1, 5 + 1):
beta = 10**-log10_alpha
for delta in [0.0, 0.0001, 0.01, 0.1]:
test = lambda self, beta=beta, delta=delta: afftc_(self, beta, delta)
test_square = lambda self, beta=beta, delta=delta: afftc_square_(self, beta, delta)
name = 'test_%s_%f' % (beta, delta)
name_square = 'test_square_%s_%f' % (beta, delta)
test.__name__ = name
test_square.__name__ = name_square
setattr(Afftc, name, test)
setattr(Afftc, name_square, test_square)
del test, test_square
name_no_bound = 'test_no_bound_%s' % beta
test_no_bound = lambda self, beta=beta: afftc_no_lower_bound_(self, beta)
test_no_bound.__name__ = name_no_bound
setattr(Afftc, name_no_bound, test_no_bound)
del test_no_bound
| 32.763441
| 91
| 0.585822
|
f25b36d42f024b2e081de613c712e84097ae3ad5
| 2,528
|
py
|
Python
|
thermo/utils/amm.py
|
janosh/thermo
|
0202a47ec8abacfd49b065ddd13ad060b0b9a1a3
|
[
"MIT"
] | 9
|
2019-10-08T20:47:30.000Z
|
2021-11-20T07:51:25.000Z
|
thermo/utils/amm.py
|
janosh/thermo
|
0202a47ec8abacfd49b065ddd13ad060b0b9a1a3
|
[
"MIT"
] | 4
|
2021-12-10T12:42:20.000Z
|
2022-03-01T21:24:18.000Z
|
thermo/utils/amm.py
|
janosh/thermo
|
0202a47ec8abacfd49b065ddd13ad060b0b9a1a3
|
[
"MIT"
] | 3
|
2019-10-28T21:50:24.000Z
|
2021-11-10T18:41:20.000Z
|
import os
import automatminer as amm
import matminer as mm
from automatminer import MatPipe # make MatPipe importable from this file
from thermo.utils.decorators import interruptable
try:
os.remove(os.getcwd() + "/automatminer.log") # delete since not needed
except FileNotFoundError:
pass
featurizers = {
"composition": [mm.featurizers.composition.ElementProperty.from_preset("magpie")],
"structure": [],
}
# To save time and computation, we only retain regressors that were among the best in
# prior unrestricted benchmarking. See https://tinyurl.com/y27hajoy and "Customizing
# TPOT's operators and parameters" at https://epistasislab.github.io/tpot/using.
good_regressors = (
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"RandomForestRegressor",
)
# Don't modify TPOT_REGRESSOR_CONFIG in place. Would throw: 'RuntimeError: There was an
# error in the TPOT optimization process. This could be because...'
tpot_config = amm.automl.config.tpot_configs.TPOT_REGRESSOR_CONFIG.copy()
for key in list(tpot_config):
if not key.endswith(good_regressors):
del tpot_config[key]
# pipe_config needs to be a function rather than a dict. Else multiple pipes
# running concurrently will receive a handle to the same learner (pipe.learner)
# and overwrite each other's fitting.
def pipe_config(preset="express", **tpot_kwargs):
tpot_default = {"max_time_mins": 10} # "config_dict": tpot_config
tpot_default.update(tpot_kwargs)
return {
**amm.get_preset_config(preset),
"autofeaturizer": amm.AutoFeaturizer(
# preset="express",
featurizers=featurizers,
guess_oxistates=False,
),
"learner": amm.TPOTAdaptor(**tpot_default),
}
@interruptable
def fit_pred_pipe(train_df, test_df, target, **kwargs):
mat_pipe = MatPipe(**pipe_config(**kwargs))
mat_pipe.fit(train_df[["T", "composition", target]], target)
pred_df = mat_pipe.predict(
test_df[["T", "composition"]], output_col=target + "_pred"
)
return mat_pipe, pred_df
def featurize(pipe, df):
"""Use a fitted MatPipe, specifically its autofeaturizer, cleaner and
reducer components, to featurize a dataframe. Can be used in combination
with custom models that don't fit into the Automatminer API like
multi-output regressors.
"""
df = pipe.autofeaturizer.transform(df, pipe.target)
df = pipe.cleaner.transform(df, pipe.target)
df = pipe.reducer.transform(df, pipe.target)
return df
| 33.706667
| 87
| 0.719146
|
925bf7375266f131aec790cdc8df5fbb5a19848a
| 22,121
|
py
|
Python
|
tests/tensorflow/test_transformations.py
|
generalova-kate/nncf
|
a2ec7bca7167318c5b1436199a4c7449a9c064e9
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_transformations.py
|
generalova-kate/nncf
|
a2ec7bca7167318c5b1436199a4c7449a9c064e9
|
[
"Apache-2.0"
] | 1
|
2021-07-23T07:46:52.000Z
|
2021-07-23T07:46:52.000Z
|
tests/tensorflow/test_transformations.py
|
generalova-kate/nncf
|
a2ec7bca7167318c5b1436199a4c7449a9c064e9
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
import networkx as nx
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.graph.transformations.commands import TransformationType
from nncf.common.quantization.structs import QuantizerConfig
from nncf.common.quantization.structs import QuantizationMode
from nncf.tensorflow.graph.converter import TFModelConverterFactory
from nncf.tensorflow.graph.model_transformer import TFModelTransformer
from nncf.tensorflow.graph.transformations import commands
from nncf.tensorflow.graph.transformations.commands import TFInsertionCommand
from nncf.tensorflow.graph.transformations.commands import TFLayer
from nncf.tensorflow.graph.transformations.commands import TFLayerWeight
from nncf.tensorflow.graph.transformations.commands import TFMultipleInsertionCommands
from nncf.tensorflow.graph.transformations.layout import TFTransformationLayout
from nncf.tensorflow.graph.utils import is_functional_model
from nncf.tensorflow.layers.custom_objects import NNCF_CUSTOM_OBJECTS
from nncf.tensorflow.layers.operation import NNCFOperation
from nncf.tensorflow.quantization.layers import FakeQuantize
from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec
from tests.tensorflow.test_compressed_graph import keras_model_to_tf_graph
from tests.tensorflow.test_compressed_graph import check_nx_graph
from tests.tensorflow.test_compressed_graph import get_nx_graph_from_tf_graph
def test_insertion_commands_union_invalid_input():
cmd_0 = commands.TFInsertionCommand(commands.TFBeforeLayer('layer_0'))
cmd_1 = commands.TFInsertionCommand(commands.TFAfterLayer('layer_0'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
priority_types = ["same", "different"]
@pytest.mark.parametrize("case", priority_types, ids=priority_types)
def test_insertion_command_priority(case):
def make_operation_fn(priority_value):
def operation_fn():
return priority_value
return operation_fn
cmds = []
if case == 'same':
for idx in range(3):
cmds.append(
commands.TFInsertionCommand(
commands.TFBeforeLayer('layer_0'),
make_operation_fn(idx)
))
else:
priorites = sorted(list(TransformationPriority), key=lambda x: x.value, reverse=True)
for priority in priorites:
cmds.append(
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
make_operation_fn(priority.value),
priority
))
res_cmd = cmds[0]
for cmd in cmds[1:]:
res_cmd = res_cmd + cmd
res = res_cmd.insertion_objects
assert len(res) == len(cmds)
assert all(res[i]() <= res[i + 1]() for i in range(len(res) - 1))
def test_removal_command_union():
cmd_0 = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
cmd_1 = commands.TFRemovalCommand(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
def test_add_insertion_command_to_multiple_insertion_commands_same():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 1
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_0'
assert res[1]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_different():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda:'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_invalid_input():
m_cmd = commands.TFMultipleInsertionCommands(commands.TFLayerWeight('layer_0', 'weights_0'))
cmd = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
with pytest.raises(Exception):
m_cmd.add_insertion_command(cmd)
def test_multiple_insertion_commands_union_invalid_input():
cmd_0 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_0'))
cmd_1 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.add_insertion_command(cmd_1)
def test_multiple_insertion_commands_union():
check_fn_0 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_0'
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
m_cmd_0 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_0,
commands=[cmd_0]
)
check_fn_1 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_1'
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd_1 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_1,
commands=[cmd_1]
)
m_cmd = m_cmd_0 + m_cmd_1
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_transformation_layout_insertion_case():
transformation_layout = TFTransformationLayout()
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda: 'cmd_1',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_2',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_3',
TransformationPriority.PRUNING_PRIORITY)
]),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_1'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_4',
TransformationPriority.PRUNING_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_1'),
lambda: 'cmd_5',
TransformationPriority.PRUNING_PRIORITY)
]),
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 2
assert res_transformations[0].type == TransformationType.MULTI_INSERT
assert res_transformations[0].target_point.type == TargetType.LAYER
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[1].type == TransformationType.MULTI_INSERT
assert res_transformations[1].target_point.type == TargetType.LAYER
assert res_transformations[1].target_point.layer_name == 'layer_1'
res_cmds = res_transformations[0].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_3' and res[1]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
res_cmds = res_transformations[1].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_4' and res[1]() == 'cmd_2'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_5'
def test_transformation_layout_removal_case():
transformation_layout = TFTransformationLayout()
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'sparsity_operation',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFRemovalCommand(commands.TFOperationWithWeights('layer_0', 'weight_0', 'sparsity_operation')),
commands.TFInsertionCommand(
commands.TFAfterLayer('layer_0'),
lambda: 'layer_1'
),
commands.TFRemovalCommand(commands.TFLayer('layer_1')),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'pruning_operation',
TransformationPriority.PRUNING_PRIORITY
)
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 5
assert res_transformations[0].type == TransformationType.INSERT
assert res_transformations[0].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[0].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].type == TransformationType.REMOVE
assert res_transformations[1].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[1].target_point.layer_name == 'layer_0'
assert res_transformations[1].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].target_point.operation_name == 'sparsity_operation'
assert res_transformations[2].type == TransformationType.INSERT
assert res_transformations[2].target_point.type == TargetType.AFTER_LAYER
assert res_transformations[2].target_point.layer_name == 'layer_0'
assert res_transformations[3].type == TransformationType.REMOVE
assert res_transformations[3].target_point.type == TargetType.LAYER
assert res_transformations[3].target_point.layer_name == 'layer_1'
assert res_transformations[4].type == TransformationType.INSERT
assert res_transformations[4].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[4].target_point.layer_name == 'layer_0'
assert res_transformations[4].target_point.weights_attr_name == 'weight_0'
CUSTOM_LAYER_NAME = "custom_layer_for_test"
class TwoWeightCustomLayerForTest(tf.keras.layers.Layer):
WEIGHT_1_NAME = 'w1'
WEIGHT_2_NAME = 'w2'
def __init__(self, name=CUSTOM_LAYER_NAME, trainable=True, dtype='float32'):
super().__init__(name=name, trainable=trainable, dtype=dtype)
self.w1 = self.add_weight(shape=(3, 1, 1, 3), name=self.WEIGHT_1_NAME)
self.w2 = self.add_weight(shape=(3, 1, 1, 3), name=self.WEIGHT_2_NAME)
def call(self, inputs, **kwargs):
x = tf.nn.conv2d(inputs, self.w1, strides=[1, 1, 1, 1], padding='SAME')
x = tf.nn.conv2d(x, self.w2, strides=[1, 1, 1, 1], padding='SAME')
return x
def ModelWithTwoWeightCustomLayer():
input_shape = (None, None, 3)
img_input = layers.Input(name='input', shape=input_shape)
x = img_input
x = TwoWeightCustomLayerForTest()(x) # custom!
model = models.Model(img_input, x, name='ModelForCustomLayerTest')
model.build([16, 16, 3])
return model
def create_transformed_model(transformation_layout: TFTransformationLayout):
model = ModelWithTwoWeightCustomLayer()
transformer = TFModelTransformer(model)
model = transformer.transform(transformation_layout)
return model
@NNCF_CUSTOM_OBJECTS.register()
class MockIdentityOp(NNCFOperation):
def build(self, input_shape, input_type, name, layer):
return {}
def call(self, inputs, weights, _):
return inputs
def test_multiple_insertion_command_has_same_effect_as_multiple_single_insertions():
check_fn = lambda src, dst: dst.type == TargetType.OPERATION_WITH_WEIGHTS
insertion_command_1 = TFInsertionCommand(
TFLayerWeight(CUSTOM_LAYER_NAME,
TwoWeightCustomLayerForTest.WEIGHT_1_NAME),
MockIdentityOp('mock_nncf_op_1'),
TransformationPriority.PRUNING_PRIORITY)
insertion_command_2 = TFInsertionCommand(
TFLayerWeight(CUSTOM_LAYER_NAME,
TwoWeightCustomLayerForTest.WEIGHT_2_NAME),
MockIdentityOp('mock_nncf_op_2'),
TransformationPriority.PRUNING_PRIORITY)
multiple_insertion_command = TFMultipleInsertionCommands(
target_point=TFLayer(CUSTOM_LAYER_NAME),
commands=[insertion_command_1, insertion_command_2],
check_target_points_fn=check_fn)
transformation_layout_multi = TFTransformationLayout()
transformation_layout_multi.register(multiple_insertion_command)
transformation_layout_two_single = TFTransformationLayout()
transformation_layout_two_single.register(insertion_command_1)
transformation_layout_two_single.register(insertion_command_2)
model_with_multi = create_transformed_model(transformation_layout_multi)
model_with_two_single = create_transformed_model(transformation_layout_two_single)
multi_config = model_with_multi.get_config()
two_single_config = model_with_two_single.get_config()
assert multi_config == two_single_config
def create_functional_model():
img_input = layers.Input(name='input', shape=(None, None, 3), dtype='float32')
x = layers.Conv2D(filters=16,
kernel_size=(3, 3),
strides=1,
padding="same",
activation='relu')(img_input)
residual = layers.Conv2D(filters=64,
kernel_size=(1, 1),
strides=2)(x)
residual = layers.BatchNormalization()(residual)
x = layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=2,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(filters=64,
kernel_size=(3, 3),
strides=1,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = tf.keras.layers.Add()([residual, x])
x = layers.Dense(units=10, activation='softmax')(x)
model = models.Model(img_input, x, name='ResnetBlockTest')
return model
def create_sequential_model():
model = tf.keras.Sequential()
model.add(layers.Input(shape=(None, None, 3)))
model.add(layers.Conv2D(filters=64,
kernel_size=(1, 1),
strides=2,
activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(2, activation="relu"))
return model
def apply_insert_after(model):
converter = TFModelConverterFactory.create(model)
transformations = TFTransformationLayout()
qconfig = QuantizerConfig(num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=None,
per_channel=False)
functional_model = is_functional_model(model)
for i, layer in enumerate(model.layers):
original_node_name = layer.name
if functional_model:
_, layer_info = converter.get_layer_info_for_node(original_node_name)
instance_idx = layer_info.instance_idx
else:
instance_idx = 0
fake_quantize_name = f'FakeQuantize_{i}/{original_node_name}'
fake_quantize_layer = FakeQuantize(
TFQuantizerSpec.from_config(qconfig, narrow_range=False, half_range=False),
name=fake_quantize_name)
transformations.register(
TFInsertionCommand(
target_point=commands.TFAfterLayer(original_node_name,
instance_idx=instance_idx,
output_port_id=0),
callable_object=fake_quantize_layer,
priority=TransformationPriority.QUANTIZATION_PRIORITY))
transformer = TFModelTransformer(model)
transformed_model = transformer.transform(transformations)
return transformed_model
def apply_insert_before(model):
converter = TFModelConverterFactory.create(model)
transformations = TFTransformationLayout()
qconfig = QuantizerConfig(num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=None,
per_channel=False)
functional_model = is_functional_model(model)
for i, layer in enumerate(model.layers):
# Insertion before input layer is not supported
if isinstance(layer, layers.InputLayer):
continue
original_node_name = layer.name
if functional_model:
_, layer_info = converter.get_layer_info_for_node(original_node_name)
instance_idx = layer_info.instance_idx
else:
instance_idx = 0
inputs = [layer.input] if isinstance(layer.input, tf.Tensor) else layer.input
for port, _ in enumerate(inputs):
fake_quantize_name = f'FakeQuantize_{i}.{port}/{original_node_name}'
fake_quantize_layer = FakeQuantize(
TFQuantizerSpec.from_config(qconfig, narrow_range=False, half_range=False),
name=fake_quantize_name)
transformations.register(
TFInsertionCommand(
target_point=commands.TFBeforeLayer(original_node_name,
instance_idx=instance_idx,
input_port_id=port),
callable_object=fake_quantize_layer,
priority=TransformationPriority.QUANTIZATION_PRIORITY))
transformer = TFModelTransformer(model)
transformed_model = transformer.transform(transformations)
return transformed_model
def check_graphs(model, ref_graph_filename):
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'model_transormer')
ref_graph_path = os.path.abspath(os.path.join(data_dir, ref_graph_filename))
graph, graph_to_layer_var_names_map = keras_model_to_tf_graph(model)
nx_graph = get_nx_graph_from_tf_graph(graph, graph_to_layer_var_names_map)
if not os.path.exists(ref_graph_path) and os.getenv("NNCF_TEST_REGEN_DOT") is not None:
nx.drawing.nx_pydot.write_dot(nx_graph, ref_graph_path)
check_nx_graph(nx_graph, ref_graph_path)
def test_functional_insert_after():
model = create_functional_model()
transformed_model = apply_insert_after(model)
check_graphs(transformed_model, 'functional_insert_after.dot')
def test_functional_insert_before():
model = create_functional_model()
transformed_model = apply_insert_before(model)
check_graphs(transformed_model, 'functional_insert_before.dot')
def test_sequential_insert_after():
model = create_sequential_model()
transformed_model = apply_insert_after(model)
check_graphs(transformed_model, 'sequential_block_insert_after.dot')
def test_sequential_insert_before():
model = create_sequential_model()
transformed_model = apply_insert_before(model)
check_graphs(transformed_model, 'sequential_block_insert_before.dot')
| 38.139655
| 112
| 0.692012
|
c3d9f0f2a63c243e086caa7277f1ace625824093
| 3,285
|
py
|
Python
|
experiment_Bayesian_CNN/demo/refactor_debug_dataset.py
|
slds-lmu/paper_2019_variationalResampleDistributionShift
|
3664eea4d243eb828d13ba69112308630d80d244
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-12T02:54:57.000Z
|
2021-07-12T02:54:57.000Z
|
experiment_Bayesian_CNN/demo/refactor_debug_dataset.py
|
compstat-lmu/paper_2019_variationalResampleDistributionShift
|
3664eea4d243eb828d13ba69112308630d80d244
|
[
"Apache-2.0",
"MIT"
] | 20
|
2020-01-28T22:18:55.000Z
|
2021-09-08T01:21:52.000Z
|
experiment_Bayesian_CNN/demo/refactor_debug_dataset.py
|
slds-lmu/paper_2019_variationalResampleDistributionShift
|
3664eea4d243eb828d13ba69112308630d80d244
|
[
"Apache-2.0",
"MIT"
] | 2
|
2019-12-14T09:17:47.000Z
|
2020-02-24T16:55:07.000Z
|
import torchvision
import torchvision.transforms as transforms
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import refactor_dataset_class
import Bayesian_config as cf
resize = cf.resize
import argparse
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--lr', default=0.0001, type=float, help='learning_rate')
parser.add_argument('--net_type', default='3conv3fc', type=str, help='model')
#parser.add_argument('--depth', default=28, type=int, help='depth of model')
#parser.add_argument('--widen_factor', default=10, type=int, help='width of model')
parser.add_argument('--num_samples', default=10, type=int, help='Number of samples')
parser.add_argument('--beta_type', default="Blundell", type=str, help='Beta type')
parser.add_argument('--p_logvar_init', default=0, type=int, help='p_logvar_init')
parser.add_argument('--q_logvar_init', default=-10, type=int, help='q_logvar_init')
parser.add_argument('--weight_decay', default=0.0005, type=float, help='weight_decay')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset = [mnist/cifar10/cifar100]')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--testOnly', '-t', action='store_true', help='Test mode with the saved model')
#args = parser.parse_args()
args = parser.parse_args(["--dataset", "mnist"])
args
transform_train = transforms.Compose([
transforms.Resize((resize, resize)),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.Resize((resize, resize)),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
start_epoch, num_epochs, batch_size, optim_type = cf.start_epoch, cf.num_epochs, cf.batch_size, cf.optim_type
trainset_org = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)
trainset_refactor = refactor_dataset_class.VGMMDataset(transform = transform_train)
trainloader_org = torch.utils.data.DataLoader(trainset_org, batch_size=batch_size, shuffle=True, num_workers=4)
trainloader_refactor = torch.utils.data.DataLoader(trainset_refactor, batch_size=batch_size, shuffle=False, num_workers=4)
# num_workers: how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0)# Return network & file name
import utils_parent
X, y = utils_parent.load_mnist("fashion-mnist")
type(y)
y.shape
ynew = y.argmax(axis = 1)
ynew
type(ynew)
for batch_idx, (inputs_value, targets) in enumerate(trainloader_org):
print(inputs_value)
print(targets)
print("......")
print(targets.type)
print(targets.shape)
break
for batch_idx, (inputs_value, targets) in enumerate(trainloader_refactor):
print("refactor")
print(inputs_value)
print(targets)
print("......")
print(targets.type)
print(targets.shape)
break
for i in range(len(trainset_refactor)):
sample = trainset_refactor[i]
break
trainset_refactor[8000][1]
| 38.197674
| 160
| 0.755251
|
f427efc3a5218b4f9de2b1ee3fb1882dddc4edaa
| 5,878
|
py
|
Python
|
docs/source/conf.py
|
keipertk/Utilities
|
5b0ab5e20f374bc7f790523a8548fdcfddcfa311
|
[
"Apache-2.0"
] | 1
|
2019-09-29T16:52:01.000Z
|
2019-09-29T16:52:01.000Z
|
docs/source/conf.py
|
keipertk/Utilities
|
5b0ab5e20f374bc7f790523a8548fdcfddcfa311
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
keipertk/Utilities
|
5b0ab5e20f374bc7f790523a8548fdcfddcfa311
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
# -- Project information -----------------------------------------------------
project = u'Utilities'
copyright = u'2019, NWChemEx Team'
author = u'NWChemEx Team'
src_dir = u'utilities'
# The short X.Y version
version = u'1.0.0'
# The full version, including alpha/beta/rc tags
release = u'1.0.0alpha'
##############################################################################
# Shouldn't need to change anything below this point #
##############################################################################
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'breathe',
'exhale'
]
dir_path = os.path.dirname(os.path.realpath(__file__))
doc_path = os.path.dirname(dir_path)
root_path = os.path.dirname(doc_path)
breathe_default_project = project
breathe_projects = { project : os.path.join(doc_path, "doxyoutput", "xml")}
exhale_args = {
"containmentFolder" : "./api",
"rootFileName" : "library_root.rst",
"rootFileTitle" : "Library API",
"doxygenStripFromPath" : "..",
"createTreeView" : True,
"exhaleExecutesDoxygen" : True,
"exhaleDoxygenStdin" : "INPUT = " + os.path.join(root_path, project,
"detail_")
}
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + '.tex',project + ' Documentation', author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project.lower(), project + ' Documentation', [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, project + ' Documentation',
author, project, 'One line description of project.', 'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 31.265957
| 80
| 0.625383
|
c056a8f48224599a4183ee559856fc7501519f5d
| 1,296
|
py
|
Python
|
backend/images/migrations/0001_initial.py
|
sophiedophie/djangoproject
|
bad22a6eef032d86cbfe16886bebd4bac948e60c
|
[
"MIT"
] | null | null | null |
backend/images/migrations/0001_initial.py
|
sophiedophie/djangoproject
|
bad22a6eef032d86cbfe16886bebd4bac948e60c
|
[
"MIT"
] | null | null | null |
backend/images/migrations/0001_initial.py
|
sophiedophie/djangoproject
|
bad22a6eef032d86cbfe16886bebd4bac948e60c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-04-25 05:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('file', models.ImageField(upload_to='')),
('location', models.CharField(max_length=140)),
('caption', models.TextField()),
],
options={
'abstract': False,
},
),
]
| 31.609756
| 114
| 0.525463
|
08aceca45ffae227bcd37d63cac9e326c2cf2c59
| 1,973
|
py
|
Python
|
classes/contrib/mypy/validation/validate_typeclass_def.py
|
ftaebi/classes
|
28d25861ce476adc8259b491492d53c627095d0b
|
[
"BSD-2-Clause"
] | 475
|
2019-11-03T07:48:43.000Z
|
2022-03-29T12:22:44.000Z
|
classes/contrib/mypy/validation/validate_typeclass_def.py
|
AVIPAGHADAR1729/classes
|
ab5ea9f1b3cdbcf87d1f6f4115ed8f42a6cb8700
|
[
"BSD-2-Clause"
] | 305
|
2019-11-18T11:52:16.000Z
|
2022-03-31T20:11:51.000Z
|
classes/contrib/mypy/validation/validate_typeclass_def.py
|
AVIPAGHADAR1729/classes
|
ab5ea9f1b3cdbcf87d1f6f4115ed8f42a6cb8700
|
[
"BSD-2-Clause"
] | 24
|
2020-02-15T18:57:54.000Z
|
2022-03-27T22:46:08.000Z
|
from typing import Union
from mypy.nodes import (
ARG_POS,
EllipsisExpr,
ExpressionStmt,
FuncDef,
PassStmt,
StrExpr,
)
from mypy.plugin import FunctionContext, MethodContext
from mypy.types import CallableType, Instance
from typing_extensions import Final
_Contexts = Union[MethodContext, FunctionContext]
# Messages:
_AT_LEAST_ONE_ARG_MSG: Final = (
'Typeclass definition must have at least one positional argument'
)
_FIRST_ARG_KIND_MSG: Final = (
'First argument in typeclass definition must be positional'
)
_REDUNDANT_BODY_MSG: Final = 'Typeclass definitions must not have bodies'
def check_type(
typeclass: Instance,
ctx: _Contexts,
) -> bool:
"""Checks typeclass definition."""
return all([
_check_first_arg(typeclass, ctx),
_check_body(typeclass, ctx),
])
def _check_first_arg(
typeclass: Instance,
ctx: _Contexts,
) -> bool:
sig = typeclass.args[1]
assert isinstance(sig, CallableType)
if not len(sig.arg_kinds):
ctx.api.fail(_AT_LEAST_ONE_ARG_MSG, ctx.context)
return False
if sig.arg_kinds[0] != ARG_POS:
ctx.api.fail(_FIRST_ARG_KIND_MSG, ctx.context)
return False
return True
def _check_body(
typeclass: Instance,
ctx: _Contexts,
) -> bool:
sig = typeclass.args[1]
assert isinstance(sig, CallableType)
assert isinstance(sig.definition, FuncDef)
body = sig.definition.body.body
if body:
is_useless_body = (
len(body) == 1 and
(isinstance(body[0], PassStmt) or (
isinstance(body[0], ExpressionStmt) and
isinstance(body[0].expr, (EllipsisExpr, StrExpr))
))
)
if is_useless_body:
# We allow a single ellipsis in function a body.
# We also allow just a docstring.
return True
ctx.api.fail(_REDUNDANT_BODY_MSG, ctx.context)
return False
return True
| 24.358025
| 73
| 0.657375
|
e3e462e7d14b9bec6530a1b2b4614e2028881032
| 8,095
|
py
|
Python
|
darwinpush/messagefactories/xml/ScheduleXMLMessageFactory.py
|
fasteroute/darwinpush
|
c919049e076cbdf61007fc9cc1c5a0271cde7929
|
[
"Apache-2.0"
] | 3
|
2015-08-15T15:38:06.000Z
|
2019-08-06T11:09:32.000Z
|
darwinpush/messagefactories/xml/ScheduleXMLMessageFactory.py
|
grundleborg/darwinpush
|
c919049e076cbdf61007fc9cc1c5a0271cde7929
|
[
"Apache-2.0"
] | 34
|
2015-07-22T13:47:16.000Z
|
2015-08-12T17:40:23.000Z
|
darwinpush/messagefactories/xml/ScheduleXMLMessageFactory.py
|
grundleborg/darwinpush
|
c919049e076cbdf61007fc9cc1c5a0271cde7929
|
[
"Apache-2.0"
] | 1
|
2015-08-30T15:26:24.000Z
|
2015-08-30T15:26:24.000Z
|
from darwinpush.messages.ScheduleMessage import *
from dateutil.parser import *
from darwinpush.utils import timezone_for_date_and_time
from darwinpush.xb.raw.sch import OR, OPOR, IP, OPIP, PP, DT, OPDT
from darwinpush.xb.raw.ct import DisruptionReasonType
from datetime import datetime, timedelta
from dateutil.parser import *
import pytz
class ScheduleXMLMessageFactory:
@staticmethod
def build(raw, containing_message, xml):
m = ScheduleMessage()
m.uid = raw.uid
m.rid = raw.rid
m.headcode = raw.trainId
m.start_date = raw.ssd.date()
m.toc_code = raw.toc
m.passenger_service = bool(raw.isPassengerSvc)
m.status = raw.status
m.category = raw.trainCat
m.active = bool(raw.isActive)
m.deleted = bool(raw.deleted)
try:
m.charter = bool(raw.charter)
except AttributeError:
m.charter = False
if raw.cancelReason is not None:
m.cancel_reason_code = raw.cancelReason.value()
m.cancel_reason_tiploc = raw.cancelReason.tiploc
m.cancel_reason_near = bool(raw.cancelReason.near)
else:
m.cancel_reason_code = None
m.cancel_reason_tiploc = None
m.cancel_reason_near = None
# Loop through all the points in the order they appear in the XML, instantiate the
# appropriate object for them, and add them to the appropriate list.
m.origins = []
m.operational_origins = []
m.intermediate_points = []
m.operational_intermediate_points = []
m.passing_points = []
m.destinations = []
m.operational_destinations = []
m.all_points = []
for r in raw.orderedContent():
v = r.value
if type(v) == DisruptionReasonType:
pass
elif type(v) == OR:
p = f.build_point(v, Origin)
m.origins.append(p)
elif type(v) == OPOR:
p = f.build_point(v, OperationalOrigin)
m.operational_origins.append(p)
elif type(v) == IP:
p = f.build_point(v, IntermediatePoint)
m.intermediate_points.append(p)
elif type(v) == OPIP:
p = f.build_point(v, OperationalIntermediatePoint)
m.operational_intermediate_points.append(p)
elif type(v) == PP:
p = f.build_point(v, PassingPoint)
m.passing_points.append(p)
elif type(v) == DT:
p = f.build_point(v, Destination)
m.destinations.append(p)
elif type(v) == OPDT:
p = f.build_point(v, OperationalDestination)
m.operational_destinations.append(p)
else:
raise Exception("Type of point is {}.".format(type(v)))
m.all_points.append(p)
first_point = m.all_points[0]
if first_point.raw_working_arrival_time is not None:
t = first_point.raw_working_arrival_time
elif first_point.raw_working_pass_time is not None:
t = first_point.raw_working_pass_time
elif first_point.raw_working_departure_time is not None:
t = first_point.raw_working_departure_time
else:
raise Exception()
tz = timezone_for_date_and_time(m.start_date, t)
day_incrementor = 0
o = None
for p in m.all_points:
day_incrementor = f.build_times(day_incrementor, o, p, m.start_date, tz)
o = p
m._raw = raw
m._containing_message = containing_message
m._xml = xml
return m
@staticmethod
def build_point(raw, t):
r = t()
r.tiploc = raw.tpl
r.activity_codes = raw.act
r.planned_activity_codes = raw.planAct
r.cancelled = bool(raw.can)
try:
r.false_tiploc = raw.fd
except AttributeError:
r.false_tiploc = None
try:
r.route_delay = raw.rdelay
except:
r.route_delay = None
try:
r.raw_working_arrival_time = parse(raw.wta).time()
except AttributeError:
r.raw_working_arrival_time = None
try:
r.raw_working_pass_time = parse(raw.wtp).time()
except AttributeError:
r.raw_working_pass_time = None
try:
r.raw_working_departure_time = parse(raw.wtd).time()
except AttributeError:
r.raw_working_departure_time = None
try:
r.raw_public_arrival_time = parse(raw.pta).time()
except AttributeError:
r.raw_public_arrival_time = None
try:
r.raw_public_departure_time = parse(raw.ptd).time()
except AttributeError:
r.raw_public_departure_time = None
return r
@staticmethod
def build_times(day_incrementor, last_location, this_location, start_date, tz):
# Construct all the date/time objects iteratively, checking for back-in-time movement of
# any of them.
last_time = None
if last_location is not None:
last_time = f.get_last_time(last_location)
if this_location.raw_working_arrival_time is not None:
t = this_location.raw_working_arrival_time
if last_time is not None:
if last_time > t:
day_incrementor += 1
d = start_date + timedelta(days=day_incrementor)
this_location.working_arrival_time = tz.localize(datetime.combine(d, t)).astimezone(pytz.utc)
else:
this_location.working_arrival_time = None
if this_location.raw_public_arrival_time is not None:
t = this_location.raw_public_arrival_time
if last_time is not None:
if last_time > t:
day_incrementor += 1
d = start_date + timedelta(days=day_incrementor)
this_location.public_arrival_time = tz.localize(datetime.combine(d, t)).astimezone(pytz.utc)
else:
this_location.public_arrival_time = None
if this_location.raw_working_pass_time is not None:
t = this_location.raw_working_pass_time
if last_time is not None:
if last_time > t:
day_incrementor += 1
d = start_date + timedelta(days=day_incrementor)
this_location.working_pass_time = tz.localize(datetime.combine(d, t)).astimezone(pytz.utc)
else:
this_location.working_pass_time = None
if this_location.raw_public_departure_time is not None:
t = this_location.raw_public_departure_time
if last_time is not None:
if last_time > t:
day_incrementor += 1
d = start_date + timedelta(days=day_incrementor)
this_location.public_departure_time = tz.localize(datetime.combine(d, t)).astimezone(pytz.utc)
else:
this_location.public_departure_time = None
if this_location.raw_working_departure_time is not None:
t = this_location.raw_working_departure_time
if last_time is not None:
if last_time > t:
day_incrementor += 1
d = start_date + timedelta(days=day_incrementor)
this_location.working_departure_time = tz.localize(datetime.combine(d, t)).astimezone(pytz.utc)
else:
this_location.working_departure_time = None
# Return the new day_incrementor value.
return day_incrementor
@staticmethod
def get_last_time(l):
if l.raw_working_departure_time is not None:
return l.raw_working_departure_time
elif l.raw_working_pass_time is not None:
return l.raw_working_pass_time
elif l.raw_working_arrival_time is not None:
return l.raw_working_arrival_time
else:
raise Exception()
f = ScheduleXMLMessageFactory
| 35.977778
| 107
| 0.604818
|
0c4396f1cbe19ed6a3c4d95eaadb364573e181be
| 981
|
py
|
Python
|
tests/test_non_neg_omp.py
|
swasun/RFOMT
|
64ba63c01bd04f4f959d18aff27e245d8fff3403
|
[
"MIT"
] | 2
|
2021-12-09T23:44:37.000Z
|
2021-12-10T10:09:20.000Z
|
tests/test_non_neg_omp.py
|
swasun/RFOMT
|
64ba63c01bd04f4f959d18aff27e245d8fff3403
|
[
"MIT"
] | null | null | null |
tests/test_non_neg_omp.py
|
swasun/RFOMT
|
64ba63c01bd04f4f959d18aff27e245d8fff3403
|
[
"MIT"
] | null | null | null |
from bolsonaro.models.nn_omp import NonNegativeOrthogonalMatchingPursuit
import numpy as np
def test_binary_classif_omp():
N = 1000
L = 100
T = np.random.rand(N, L)
w_star = np.zeros(L)
w_star[:L//2] = np.abs(np.random.rand(L//2))
T /= np.linalg.norm(T, axis=0)
y = T @ w_star
requested_solutions = list(range(10, L, 10))
print()
print(len(requested_solutions))
print(L//2)
nn_omp = NonNegativeOrthogonalMatchingPursuit(max_iter=L, intermediate_solutions_sizes=requested_solutions, fill_with_final_solution=False)
nn_omp.fit(T, y)
lst_predict = nn_omp.predict(T)
print(len(lst_predict))
# solutions = nn_omp(T, y, L, requested_solutions)
#
# for idx_sol, w in enumerate(solutions):
# solution = T @ w
# non_zero = w.astype(bool)
# print(requested_solutions[idx_sol], np.sum(non_zero), np.linalg.norm(solution - y)/np.linalg.norm(y))
# assert isinstance(results, np.ndarray)
| 29.727273
| 143
| 0.673802
|
b35322a47724dbeaae13e70107f86ad52ee1111c
| 731
|
py
|
Python
|
other/TMP.py
|
RichardLeeK/MachineLearning
|
5d4194dc54e5c25beeaf0d16c249bb07cf622364
|
[
"MIT"
] | 1
|
2017-08-17T11:30:26.000Z
|
2017-08-17T11:30:26.000Z
|
other/TMP.py
|
RichardLeeK/MachineLearning
|
5d4194dc54e5c25beeaf0d16c249bb07cf622364
|
[
"MIT"
] | null | null | null |
other/TMP.py
|
RichardLeeK/MachineLearning
|
5d4194dc54e5c25beeaf0d16c249bb07cf622364
|
[
"MIT"
] | null | null | null |
file = open('new_sample_0703.csv')
lines = file.readlines()
file.close()
pen = open('features.csv', 'w')
sl = lines[0].split(',')
for s in sl:
if 'SpectralAnalysis' in s:
if 'ABP' in s:
pen.write('Spectral,ABP,'+s+'\n')
elif 'ICP' in s:
pen.write('Spectral,ICP,'+s+'\n')
else:
pen.write('Spectral,OTH,'+s+'\n')
elif 'HRV' in s:
pen.write('HRV,,'+s+'\n')
elif 'MorphologyAnalysis' in s:
if 'Abp' in s:
pen.write('Spectral,ABP,'+s+'\n')
elif 'Icp' in s:
pen.write('Spectral,ICP,'+s+'\n')
else:
pen.write('Spectral,OTH,'+s+'\n')
elif 'BasicInfo' in s:
pen.write('Basic,,'+s+'\n')
elif 'BRS' in s:
pen.write('BRS,,'+s+'\n')
pen.close()
print('fin')
| 22.151515
| 39
| 0.549932
|
5dcf170248a0bc35c0cd973c7ae6599d22576515
| 1,966
|
py
|
Python
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/ListBriefSkillGroupsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/ListBriefSkillGroupsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/ListBriefSkillGroupsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class ListBriefSkillGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2020-07-01', 'ListBriefSkillGroups')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_SearchPattern(self):
return self.get_query_params().get('SearchPattern')
def set_SearchPattern(self,SearchPattern):
self.add_query_param('SearchPattern',SearchPattern)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
| 35.107143
| 74
| 0.767548
|
fa2e3d618a07f2d50df51f3916b51f975ee09372
| 3,410
|
py
|
Python
|
Python/Encrypt credentials/Encryption sample/utils.py
|
AdamRiddick/PowerBI-Developer-Samples
|
4032ffd63f7e3133c2320d23dbdb0e7fbb782a37
|
[
"MIT"
] | 611
|
2019-05-09T04:56:36.000Z
|
2022-03-28T07:48:57.000Z
|
Python/Encrypt credentials/Encryption sample/utils.py
|
AdamRiddick/PowerBI-Developer-Samples
|
4032ffd63f7e3133c2320d23dbdb0e7fbb782a37
|
[
"MIT"
] | 56
|
2017-03-06T12:05:26.000Z
|
2019-04-29T22:52:29.000Z
|
Python/Encrypt credentials/Encryption sample/utils.py
|
AdamRiddick/PowerBI-Developer-Samples
|
4032ffd63f7e3133c2320d23dbdb0e7fbb782a37
|
[
"MIT"
] | 599
|
2019-05-08T07:01:04.000Z
|
2022-03-29T21:45:55.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
cred_types = {
'KEY': 'Key',
'WINDOWS': 'Windows',
'OAUTH2': 'OAuth2',
'BASIC': 'Basic'
}
class Utils:
def validate_config(app):
''' Returns a message to user for missing configuration
Args:
app (Flask): Flask app object
Returns:
string: Error info
'''
authenticate_mode = app.config['AUTHENTICATION_MODE']
tenant_id = app.config['TENANT_ID']
client_id = app.config['CLIENT_ID']
username = app.config['POWER_BI_USER']
password = app.config['POWER_BI_PASS']
client_secret = app.config['CLIENT_SECRET']
scope = app.config['SCOPE']
authority = app.config['AUTHORITY']
if authenticate_mode == '':
return 'Please specify one of the two authentication modes in config.py file'
if authenticate_mode.lower() != 'masteruser' and authenticate_mode.lower() != 'serviceprincipal':
return 'Please specify one of the two authentication modes [serviceprincipal or masteruser] in config.py file'
if authenticate_mode.lower() == 'serviceprincipal' and tenant_id == '':
return 'Tenant ID is not provided in config.py file'
if client_id == '':
return 'Client ID is not provided in config.py file'
if authenticate_mode.lower() == 'masteruser':
if username == '':
return 'Master account username is not provided in config.py file'
elif password == '':
return 'Master account password is not provided in config.py file'
if authenticate_mode.lower() == 'serviceprincipal' and client_secret == '':
return 'Client secret is not provided in config.py file'
if scope == '':
return 'Scope is not provided in config.py file'
if authority == '':
return 'Authority URL is not provided in config.py file'
def serialize_credentials(credentials_arr, cred_type):
''' Returns serialized credentials
Args:
credentials_arr (dict): Credentials based on the user input of the credentials type
cred_type (string): Credentials type (i.e. Basic, Windows)
Returns:
string: Serialized credentials
'''
serialized_credentials = ''
if cred_type == cred_types['KEY']:
serialized_credentials = '{\'credentialData\':[{\'name\':\'key\',\'value\':\'' + credentials_arr[0] + '\'}]}'
elif cred_type == cred_types['WINDOWS']:
serialized_credentials = '{\'credentialData\':[{\'name\':\'username\',\'value\':\'' + \
credentials_arr[0] + '\'},{\'name\':\'password\',\'value\':\'' + \
credentials_arr[1] + '\'}]}'
elif cred_type == cred_types['OAUTH2']:
serialized_credentials = '{\'credentialData\':[{\'name\':\'accessToken\',\'value\':\'' + credentials_arr[0] + '\'}]}'
elif cred_type == cred_types['BASIC']:
serialized_credentials = '{\'credentialData\':[{\'name\':\'username\',\'value\':\'' + \
credentials_arr[0] + '\'},{\'name\':\'password\',\'value\':\'' + \
credentials_arr[1] + '\'}]}'
else:
raise Exception('Invalid credentials type')
return serialized_credentials
| 39.651163
| 129
| 0.585924
|
77c82bc5f3adb6e0bae8a57e6d62f8f9c689df71
| 22,045
|
py
|
Python
|
hummingbird/ml/convert.py
|
rathijit/hummingbird
|
b634a4e5152757af2ff7a41059b3e52c8140fc04
|
[
"MIT"
] | 1
|
2021-05-13T19:14:04.000Z
|
2021-05-13T19:14:04.000Z
|
hummingbird/ml/convert.py
|
hsaputra/hummingbird
|
0ebd6be58880615bc86eab3648056682b40de614
|
[
"MIT"
] | null | null | null |
hummingbird/ml/convert.py
|
hsaputra/hummingbird
|
0ebd6be58880615bc86eab3648056682b40de614
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Hummingbird main (converters) API.
"""
from copy import deepcopy
import psutil
import numpy as np
from .operator_converters import constants
from ._parse import parse_sklearn_api_model, parse_onnx_api_model, parse_sparkml_api_model
from ._topology import convert as topology_converter
from ._utils import (
torch_installed,
lightgbm_installed,
xgboost_installed,
pandas_installed,
sparkml_installed,
is_pandas_dataframe,
is_spark_dataframe,
tvm_installed,
)
from .exceptions import MissingConverter, MissingBackend
from .supported import backends
# Invoke the registration of all our converters.
from . import operator_converters # noqa
# Set up the converter dispatcher.
from .supported import xgb_operator_list # noqa
from .supported import lgbm_operator_list # noqa
def _is_onnx_model(model):
"""
Function returning whether the input model is an ONNX model or not.
"""
return type(model).__name__ == "ModelProto"
def _is_sparkml_model(model):
"""
Function returning whether the input model is a Spark-ML model or not.
"""
if sparkml_installed():
from pyspark.ml import Model, Transformer
from pyspark.ml.pipeline import PipelineModel
return isinstance(model, Model) or isinstance(model, PipelineModel) or isinstance(model, Transformer)
else:
return False
def _supported_backend_check(backend_formatted, backend_original):
"""
Function used to check whether the specified backend is supported or not.
"""
if backend_formatted is None:
raise MissingBackend("Backend: formatted {}, original {}".format(backend_formatted, backend_original))
def _supported_backend_check_config(model, backend, extra_config):
"""
Function used to check whether the specified backend and configuration pair is supported or not.
"""
assert torch_installed(), "To use Hummingbird you need to install torch."
import onnx
import torch
tvm_backend = None
if tvm_installed():
import tvm
tvm_backend = tvm.__name__
if (
(backend == torch.jit.__name__ and not _is_onnx_model(model))
or backend == tvm_backend
or (backend == onnx.__name__ and not _is_onnx_model(model))
) and constants.TEST_INPUT not in extra_config:
raise RuntimeError("Backend {} requires test inputs. Please pass some test input to the convert.".format(backend))
def _convert_sklearn(model, backend, test_input, device, extra_config={}):
"""
This function converts the specified *scikit-learn* (API) model into its *backend* counterpart.
The supported operators and backends can be found at `hummingbird.ml.supported`.
"""
assert model is not None
assert torch_installed(), "To use Hummingbird you need to install torch."
# Parse scikit-learn model as our internal data structure (i.e., Topology)
# We modify the scikit learn model during translation.
model = deepcopy(model)
topology = parse_sklearn_api_model(model, extra_config)
# Convert the Topology object into a PyTorch model.
hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
return hb_model
def _convert_lightgbm(model, backend, test_input, device, extra_config={}):
"""
This function is used to generate a *backend* model from a given input [LightGBM] model.
[LightGBM]: https://lightgbm.readthedocs.io/
"""
assert (
lightgbm_installed()
), "To convert LightGBM models you need to install LightGBM (or `pip install hummingbird-ml[extra]`)."
return _convert_sklearn(model, backend, test_input, device, extra_config)
def _convert_xgboost(model, backend, test_input, device, extra_config={}):
"""
This function is used to generate a *backend* model from a given input [XGBoost] model.
[XGBoost]: https://xgboost.readthedocs.io/
"""
assert (
xgboost_installed()
), "To convert XGboost models you need to instal XGBoost (or `pip install hummingbird-ml[extra]`)."
# XGBoostRegressor and Classifier have different APIs for extracting the number of features.
# In the former case we need to infer them from the test_input.
if "_features_count" in dir(model):
extra_config[constants.N_FEATURES] = model._features_count
elif test_input is not None:
if type(test_input) is np.ndarray and len(test_input.shape) == 2:
extra_config[constants.N_FEATURES] = test_input.shape[1]
else:
raise RuntimeError(
"XGBoost converter is not able to infer the number of input features.\
Please pass a test_input to convert or \
fill an issue at https://github.com/microsoft/hummingbird/."
)
else:
raise RuntimeError(
"XGBoost converter is not able to infer the number of input features.\
Please pass some test_input to the converter."
)
return _convert_sklearn(model, backend, test_input, device, extra_config)
def _convert_onnxml(model, backend, test_input, device, extra_config={}):
"""
This function converts the specified [ONNX-ML] model into its *backend* counterpart.
The supported operators can be found at `hummingbird.ml.supported`.
"""
assert model is not None
assert torch_installed(), "To use Hummingbird you need to install torch."
import onnx
# The conversion requires some test input for tracing.
# Test inputs can be either provided or generate from the input schema of the model.
# Generate some test input if necessary.
if test_input is None:
import torch
from onnxconverter_common.data_types import FloatTensorType, DoubleTensorType, Int32TensorType, Int64TensorType
tvm_backend = None
if tvm_installed():
import tvm
tvm_backend = tvm.__name__
# Get the input information from the ONNX schema.
initial_types = []
for input in model.graph.input:
name = input.name if hasattr(input, "name") else None
data_type = (
input.type.tensor_type.elem_type
if hasattr(input, "type")
and hasattr(input.type, "tensor_type")
and hasattr(input.type.tensor_type, "elem_type")
else None
)
if name is None:
raise RuntimeError(
"Cannot fetch input name or data_type from the ONNX schema. Please provide some test input."
)
if data_type is None:
raise RuntimeError(
"Cannot fetch input data_type from the ONNX schema, or data type is not tensor_type. Please provide some test input."
)
if not hasattr(input.type.tensor_type, "shape"):
raise RuntimeError("Cannot fetch input shape from ONNX schema. Please provide some test input.")
shape = [dim.dim_value for dim in input.type.tensor_type.shape.dim]
if len(shape) == 1:
shape = [1, shape[0]]
assert len(shape) == 2
# In ONNX dynamic dimensions will have a shape of 0. Fix the 0-shape in the batch dimension if they exist.
if shape[0] == 0:
shape[0] = 1
if data_type == 1:
initial_types.append((name, FloatTensorType(shape)))
elif data_type == 11:
initial_types.append((name, DoubleTensorType(shape)))
elif data_type == 6:
initial_types.append((name, Int32TensorType(shape)))
elif data_type == 7:
initial_types.append((name, Int64TensorType(shape)))
else:
raise RuntimeError(
"Input data type {} not supported. Please fill an issue at https://github.com/microsoft/hummingbird/, or pass some test_input".format(
data_type
)
)
first_shape = initial_types[0][1].shape
assert all(
map(lambda x: x[1].shape == first_shape, initial_types)
), "Hummingbird currently supports only inputs with same shape."
extra_config[constants.N_INPUTS] = len(initial_types)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS] * first_shape[1]
# Generate some random input data if necessary for the model conversion.
if backend == onnx.__name__ or backend == tvm_backend or backend == torch.jit.__name__:
test_input = []
for i, it in enumerate(initial_types):
if type(it[1]) is FloatTensorType:
test_input.append(np.array(np.random.rand(first_shape[0], first_shape[1]), dtype=np.float32))
elif type(it[1]) is DoubleTensorType:
test_input.append(np.random.rand(first_shape[0], first_shape[1]))
elif type(it[1]) is Int32TensorType:
test_input.append(np.array(np.random.randint(100, size=first_shape), dtype=np.int32))
elif type(it[1]) is Int64TensorType:
test_input.append(np.random.randint(100, size=first_shape))
else:
raise RuntimeError(
"Type {} not supported. Please fill an issue on https://github.com/microsoft/hummingbird/.".format(
type(it[1])
)
)
if extra_config[constants.N_INPUTS] == 1:
test_input = test_input[0]
else:
test_input = tuple(test_input)
extra_config[constants.TEST_INPUT] = test_input
# Set the number of features. Some converter requires to know in advance the number of features.
if constants.N_FEATURES not in extra_config and test_input is not None:
if len(test_input.shape) < 2:
extra_config[constants.N_FEATURES] = 1
else:
extra_config[constants.N_FEATURES] = test_input.shape[1]
# Set the initializers. Some converter requires the access to initializers.
initializers = {} if model.graph.initializer is None else {in_.name: in_ for in_ in model.graph.initializer}
extra_config[constants.ONNX_INITIALIZERS] = initializers
# Parse ONNX model as our internal data structure (i.e., Topology).
topology = parse_onnx_api_model(model)
# Convert the Topology object into a PyTorch model.
hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
return hb_model
def _convert_sparkml(model, backend, test_input, device, extra_config={}):
"""
This function converts the specified *Spark-ML* (API) model into its *backend* counterpart.
The supported operators and backends can be found at `hummingbird.ml.supported`.
"""
assert model is not None
assert torch_installed(), "To use Hummingbird you need to install torch."
# Parse Spark-ML model as our internal data structure (i.e., Topology)
# We modify the Spark-ML model during translation.
model = model.copy()
topology = parse_sparkml_api_model(model, extra_config)
# Convert the Topology object into a PyTorch model.
hb_model = topology_converter(topology, backend, test_input, device, extra_config=extra_config)
return hb_model
def _convert_common(model, backend, test_input=None, device="cpu", extra_config={}):
"""
A common function called by convert(...) and convert_batch(...) below.
"""
assert model is not None
# We destroy extra_config during conversion, we create a copy here.
extra_config = deepcopy(extra_config)
# Set some default configurations.
# Add test input as extra configuration for conversion.
if (
test_input is not None
and constants.TEST_INPUT not in extra_config
and (is_spark_dataframe(test_input) or len(test_input) > 0)
):
extra_config[constants.TEST_INPUT] = test_input
# By default we return the converted model wrapped into a `hummingbird.ml._container.SklearnContainer` object.
if constants.CONTAINER not in extra_config:
extra_config[constants.CONTAINER] = True
# By default we set num of intra-op parallelism to be the number of physical cores available
if constants.N_THREADS not in extra_config:
extra_config[constants.N_THREADS] = psutil.cpu_count(logical=False)
# Fix the test_input type
if constants.TEST_INPUT in extra_config:
if type(extra_config[constants.TEST_INPUT]) == list:
extra_config[constants.TEST_INPUT] = np.array(extra_config[constants.TEST_INPUT])
elif type(extra_config[constants.TEST_INPUT]) == tuple:
# We are passing multiple datasets.
assert all(
[type(input) == np.ndarray for input in extra_config[constants.TEST_INPUT]]
), "When passing multiple inputs only ndarrays are supported."
assert all([len(input.shape) == 2 for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_FEATURES] = sum([input.shape[1] for input in extra_config[constants.TEST_INPUT]])
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT])
elif pandas_installed() and is_pandas_dataframe(extra_config[constants.TEST_INPUT]):
# We split the input dataframe into columnar ndarrays
extra_config[constants.N_INPUTS] = len(extra_config[constants.TEST_INPUT].columns)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
input_names = list(extra_config[constants.TEST_INPUT].columns)
splits = [extra_config[constants.TEST_INPUT][input_names[idx]] for idx in range(extra_config[constants.N_INPUTS])]
splits = [df.to_numpy().reshape(-1, 1) for df in splits]
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
elif sparkml_installed() and is_spark_dataframe(extra_config[constants.TEST_INPUT]):
from pyspark.ml.linalg import DenseVector, SparseVector, VectorUDT
from pyspark.sql.types import ArrayType, FloatType, DoubleType, IntegerType, LongType
df = extra_config[constants.TEST_INPUT]
input_names = [field.name for field in df.schema.fields]
extra_config[constants.N_INPUTS] = len(input_names)
extra_config[constants.N_FEATURES] = extra_config[constants.N_INPUTS]
size = df.count()
row_dict = df.take(1)[0].asDict()
splits = []
for field in df.schema.fields:
data_col = row_dict[field.name]
spark_dtype = type(field.dataType)
shape = 1
if spark_dtype in [DenseVector, VectorUDT]:
np_dtype = np.float64
shape = data_col.array.shape[0]
elif spark_dtype == SparseVector:
np_dtype = np.float64
shape = data_col.size
elif spark_dtype == ArrayType:
np_dtype = np.float64
shape = len(data_col)
elif spark_dtype == IntegerType:
np_dtype = np.int32
elif spark_dtype == FloatType:
np_dtype = np.float32
elif spark_dtype == DoubleType:
np_dtype = np.float64
elif spark_dtype == LongType:
np_dtype = np.int64
else:
raise ValueError("Unrecognized data type: {}".format(spark_dtype))
splits.append(np.zeros((size, shape), np_dtype))
extra_config[constants.TEST_INPUT] = tuple(splits) if len(splits) > 1 else splits[0]
extra_config[constants.INPUT_NAMES] = input_names
test_input = extra_config[constants.TEST_INPUT]
# We do some normalization on backends.
if type(backend) != str:
raise ValueError("Backend must be a string: {}".format(backend))
backend_formatted = backend.lower()
backend_formatted = backends[backend_formatted]
# Check whether we actually support the backend.
_supported_backend_check(backend_formatted, backend)
_supported_backend_check_config(model, backend_formatted, extra_config)
if type(model) in xgb_operator_list:
return _convert_xgboost(model, backend_formatted, test_input, device, extra_config)
if type(model) in lgbm_operator_list:
return _convert_lightgbm(model, backend_formatted, test_input, device, extra_config)
if _is_onnx_model(model):
return _convert_onnxml(model, backend_formatted, test_input, device, extra_config)
if _is_sparkml_model(model):
return _convert_sparkml(model, backend_formatted, test_input, device, extra_config)
return _convert_sklearn(model, backend_formatted, test_input, device, extra_config)
def convert(model, backend, test_input=None, device="cpu", extra_config={}):
"""
This function converts the specified input *model* into an implementation targeting *backend*.
*Convert* supports [Sklearn], [LightGBM], [XGBoost], [ONNX], and [SparkML] models.
For *LightGBM* and *XGBoost* currently only the Sklearn API is supported.
The detailed list of models and backends can be found at `hummingbird.ml.supported`.
The *onnx* backend requires either a test_input of a the initial types set through the exta_config parameter.
The *torch.jit* and *tvm* backends require a test_input.
For *tvm* backend, the output container can do prediction only on the test data with the same size as test_input.
[Sklearn]: https://scikit-learn.org/
[LightGBM]: https://lightgbm.readthedocs.io/
[XGBoost]: https://xgboost.readthedocs.io/
[ONNX]: https://onnx.ai/
[ONNX-ML]: https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md
[ONNX operators]: https://github.com/onnx/onnx/blob/master/docs/Operators.md
[Spark-ML]: https://spark.apache.org/docs/latest/api/python/pyspark.ml.html
Args:
model: An input model
backend: The target for the conversion
test_input: Some input data used to trace the model execution.
Multiple inputs can be passed as `tuple` objects or pandas Dataframes.
When possible, (`numpy`)`arrays` are suggested.
The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM.
device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and
the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'.
extra_config: Extra configurations to be used by the individual operator converters.
The set of supported extra configurations can be found at `hummingbird.ml.supported`
Examples:
>>> pytorch_model = convert(sklearn_model,`torch`)
Returns:
A model implemented in *backend*, which is equivalent to the input model
"""
assert constants.REMAINDER_SIZE not in extra_config
return _convert_common(model, backend, test_input, device, extra_config)
def convert_batch(model, backend, test_input, remainder_size=0, device="cpu", extra_config={}):
"""
A convert function for batch by batch prediction use cases.
For some backends such as TVM, a container returned by `convert(...)` function above has a strict requirement on the
allowable input shape.
The container returned by this function is more flexible in that it can predict on the input of size
`test_input.shape[0] * k + remainder_size`, where `k` is any integer.
`test_input.shape[0]`, the number of rows in the `test_input`, is interpreted as a batch size, and at test time
prediction proceeds in a batch by batch fashion.
See the documentation for *convert(...)* above for more information.
Args:
model: An input model
backend: The target for the conversion
test_input: Some input data used to trace the model execution.
Multiple inputs can be passed as `tuple` objects or pandas Dataframes.
When possible, (`numpy`)`arrays` are suggested.
The number of rows becomes the batch size when tracing PyTorch models and compiling with TVM.
remainder_size: An integer that together with test_input determines the size of test data that can be predicted.
The input to the returned container can be of size `test_input.shape[0] * k + remainder_size`, where `k`
is any integer.
device: The target device the model should be run. This parameter is only used by the *torch** backends and *tvm*, and
the devices supported are the one supported by PyTorch, i.e., 'cpu' or 'cuda'.
extra_config: Extra configurations to be used by the individual operator converters.
The set of supported extra configurations can be found at `hummingbird.ml.supported`
Examples:
>>> tvm_model = convert_batch(sklearn_model,`tvm`, X)
>>> tvm_model = convert_batch(sklearn_model,`tvm`, X, remainder_size=50)
Returns:
A `BatchContainer` object that wraps one or two containers created by `convert(...)` function above.
"""
extra_config[constants.REMAINDER_SIZE] = remainder_size
return _convert_common(model, backend, test_input, device, extra_config)
| 46.804671
| 154
| 0.664096
|
a8ffff137f86ceca07334836a572d1025a1574c0
| 874
|
py
|
Python
|
build/lib/swaglyrics/spotify.py
|
MatejMecka/SwagLyrics-For-Spotify
|
74e329ce675f977ade490846f7faadd064c96380
|
[
"MIT"
] | 1
|
2019-05-23T20:24:05.000Z
|
2019-05-23T20:24:05.000Z
|
swaglyrics/spotify.py
|
MatejMecka/SwagLyrics-For-Spotify
|
74e329ce675f977ade490846f7faadd064c96380
|
[
"MIT"
] | null | null | null |
swaglyrics/spotify.py
|
MatejMecka/SwagLyrics-For-Spotify
|
74e329ce675f977ade490846f7faadd064c96380
|
[
"MIT"
] | null | null | null |
import win32gui
def get_info_windows():
windows = []
# Older Spotify versions - simply FindWindow for "SpotifyMainWindow"
windows.append(win32gui.GetWindowText(win32gui.FindWindow("SpotifyMainWindow", None)))
# Newer Spotify versions - create an EnumHandler for EnumWindows and flood the list with Chrome_WidgetWin_0s
def find_spotify_uwp(hwnd, windows):
text = win32gui.GetWindowText(hwnd)
if win32gui.GetClassName(hwnd) == "Chrome_WidgetWin_0" and len(text) > 0:
windows.append(text)
win32gui.EnumWindows(find_spotify_uwp, windows)
while windows.count != 0:
try:
text = windows.pop()
except:
return None
try:
artist, track = text.split(" - ", 1)
return artist, track
except:
pass
def artist():
try:
return get_info_windows()[0]
except:
return None
def song():
try:
return get_info_windows()[1]
except:
return None
| 21.317073
| 109
| 0.726545
|
6c359d5f59caabc6b12bd5ba1cc82e28b9aec304
| 8,183
|
py
|
Python
|
mapry/cpp/generate/jsoncpp_header.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 11
|
2019-06-26T05:56:41.000Z
|
2021-03-28T16:44:16.000Z
|
mapry/cpp/generate/jsoncpp_header.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 4
|
2019-10-18T14:43:59.000Z
|
2020-04-02T19:12:07.000Z
|
mapry/cpp/generate/jsoncpp_header.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 3
|
2019-06-17T07:39:03.000Z
|
2020-04-01T14:01:23.000Z
|
"""Generate the header for de/serialization from/to Jsoncpp values."""
from typing import List, Sequence, Set, Union # pylint: disable=unused-import
from icontract import ensure
import mapry
import mapry.cpp.generate
import mapry.cpp.jinja2_env
import mapry.cpp.naming
import mapry.indention
@ensure(lambda result: not result.endswith('\n'))
def _includes(
graph: mapry.Graph, cpp: mapry.Cpp, types_header_path: str,
parse_header_path: str) -> str:
"""
Generate the include directives of the header file.
:param graph: definition of the object graph
:param cpp: C++ settings
:param types_header_path:
path to the header file that defines the types of the object graph
:param parse_header_path:
path to the header file that defines the general parsing structures
:return: generated code
"""
# pylint: disable=too-many-branches
stl_block = set() # type: Set[str]
third_party_block = {"#include <json/json.h> // jsoncpp"}
# yapf: disable
first_party_block = {
'#include "{}"'.format(pth)
for pth in [types_header_path, parse_header_path]}
# yapf: enable
if mapry.needs_type(a_type=graph, query=mapry.String):
stl_block.add("#include <string>")
if mapry.needs_type(a_type=graph, query=mapry.Path):
if cpp.path_as == 'std::filesystem::path':
stl_block.add("#include <filesystem>")
elif cpp.path_as == "boost::filesystem::path":
third_party_block.add("#include <boost/filesystem/path.hpp>")
else:
raise NotImplementedError(
"Unhandled schema.cpp.path_as: {!r}".format(cpp.path_as))
if graph.classes:
stl_block.add("#include <map>")
stl_block.add("#include <string>")
# Check for optional fields
has_optional = False
for prop in graph.properties.values():
if prop.optional:
has_optional = True
break
if not has_optional:
for cls in graph.classes.values():
for prop in cls.properties.values():
if prop.optional:
has_optional = True
break
if not has_optional:
for embed in graph.embeds.values():
for prop in embed.properties.values():
if prop.optional:
has_optional = True
break
if has_optional:
if cpp.optional_as == "boost::optional":
third_party_block.add("#include <boost/optional.hpp>")
elif cpp.optional_as == "std::optional":
stl_block.add("#include <optional>")
elif cpp.optional_as == "std::experimental::optional":
third_party_block.add("#include <optional.hpp>")
else:
raise NotImplementedError(
"Unhandled schema.cpp.optional_as: {!r}".format(
cpp.optional_as))
block_strs = [
'\n'.join(sorted(third_party_block)), '\n'.join(sorted(stl_block)),
'\n'.join(sorted(first_party_block))
]
assert all(block_str == block_str.strip() for block_str in block_strs), \
"No empty blocks in include blocks."
return '\n\n'.join(block_strs)
_PARSE_JSONCPP_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
/**
* parses {{graph.name|as_composite}} from a JSON value.
*
* @param [in] value to be parsed
* @param [in] ref reference to the value (e.g., a reference path)
* @param [out] target parsed {{ graph.name|as_composite }}
* @param [out] errors encountered during parsing
*/
void {{graph.name|as_variable}}_from(
const Json::Value& value,
std::string ref,
{{ graph.name|as_composite }}* target,
parse::Errors* errors);
{% if nongraph_composites %}
{% for composite in nongraph_composites %}
/**
* parses {{ composite.name|as_composite }} from a JSON value.
*
* @param [in] value to be parsed
{% for ref_cls in references[composite] %}
* @param {{
ref_cls.plural|as_variable }}_registry registry of the {{
ref_cls.name|as_composite }} instances
{% endfor %}
* @param ref reference to the value (e.g., a reference path)
* @param [out] target parsed data
* @param [out] errors encountered during parsing
*/
void {{ composite.name|as_variable }}_from(
const Json::Value& value,
{% for ref_cls in references[composite] %}
const std::map<std::string, std::unique_ptr<{{
ref_cls.name|as_composite }}>>& {{
ref_cls.plural|as_variable }}_registry,
{% endfor %}
std::string ref,
{{ composite.name|as_composite }}* target,
parse::Errors* errors);
{% endfor %}
{% endif %}''')
@ensure(lambda result: not result.endswith('\n'))
def _parse_definitions(graph: mapry.Graph) -> str:
"""
Generate the code that defines the parsing functions of Jsoncpp.
:param graph: mapry definition of the object graph
:return: generated code
"""
nongraph_composites = [] # type: List[Union[mapry.Embed, mapry.Class]]
nongraph_composites.extend(graph.embeds.values())
nongraph_composites.extend(graph.classes.values())
# yapf: disable
references = {
composite: mapry.references(a_type=composite)
for composite in nongraph_composites}
# yapf: enable
return _PARSE_JSONCPP_TPL.render(
graph=graph,
nongraph_composites=nongraph_composites,
references=references).rstrip()
_SERIALIZE_DEFINITIONS_TPL = mapry.cpp.jinja2_env.ENV.from_string(
'''\
{% for composite in composites %}
/**
* serializes {{ composite.name|as_composite }} to a JSON value.
*
* @param {{ composite.name|as_variable }} to be serialized
* @return JSON value
*/
Json::Value serialize_{{ composite.name|as_variable }}(
const {{ composite.name|as_composite }}& {{ composite.name|as_variable }});
{% endfor %}''')
@ensure(lambda result: not result.endswith('\n'))
def _serialize_definitions(composites: Sequence[mapry.Composite]) -> str:
"""
Generate the definitions of functions that serialize the composite object.
:param composites:
all composites (graph, classes and embeds) defined in the graph
:return: generated code
"""
return _SERIALIZE_DEFINITIONS_TPL.render(composites=composites).rstrip()
@ensure(lambda result: result.endswith('\n'))
def generate(
graph: mapry.Graph, cpp: mapry.Cpp, types_header_path: str,
parse_header_path: str) -> str:
"""
Generate the header file for de/serialization from/to Jsoncpp.
:param graph: definition of the object graph
:param cpp: C++ settings
:param types_header_path:
path to the header file that defines the types of the object graph
:param parse_header_path:
path to the header file that defines the general parsing structures
:return: content of the header file
"""
blocks = [
"#pragma once", mapry.cpp.generate.WARNING,
_includes(
graph=graph,
cpp=cpp,
types_header_path=types_header_path,
parse_header_path=parse_header_path)
]
namespace_parts = cpp.namespace.split('::')
if namespace_parts:
# yapf: disable
namespace_opening = '\n'.join(
['namespace {} {{'.format(namespace_part)
for namespace_part in namespace_parts])
# yapf: enable
blocks.append(namespace_opening)
blocks.append('namespace jsoncpp {')
blocks.append(_parse_definitions(graph=graph))
composites = [] # type: List[mapry.Composite]
composites.append(graph)
composites.extend(graph.classes.values())
composites.extend(graph.embeds.values())
blocks.append(_serialize_definitions(composites=composites))
blocks.append('} // namespace jsoncpp')
if namespace_parts:
# yapf: disable
namespace_closing = '\n'.join(
['}} // namespace {}'.format(namespace_part)
for namespace_part in reversed(namespace_parts)])
# yapf: enable
blocks.append(namespace_closing)
blocks.append(mapry.cpp.generate.WARNING)
return mapry.indention.reindent(
text='\n\n'.join(blocks) + '\n', indention=cpp.indention)
| 32.472222
| 79
| 0.649884
|
50f6eafaa84195f1646bdcd734976dd3705c091c
| 641
|
py
|
Python
|
beginner/02-list-overlap.py
|
chaudha4/python-projects
|
baba3235069b7d6b084f28904f0662c043762175
|
[
"MIT"
] | null | null | null |
beginner/02-list-overlap.py
|
chaudha4/python-projects
|
baba3235069b7d6b084f28904f0662c043762175
|
[
"MIT"
] | 3
|
2021-11-23T22:19:19.000Z
|
2022-03-12T00:52:34.000Z
|
beginner/02-list-overlap.py
|
chaudha4/python-projects
|
baba3235069b7d6b084f28904f0662c043762175
|
[
"MIT"
] | null | null | null |
"""
Take two lists, say for example these two:
a = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
and write a program that returns a list that contains only the elements that are common between the lists
"""
a = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
#Using loop
c = []
for aa in a:
for bb in b:
if aa == bb:
c.append(aa)
print("Using Loop - ", c)
# Using set
c = list(set(a) & set(b))
print("Using set() - ", c)
# Using list comprehensions
c = [aa for aa in a for bb in b if aa==bb]
print("Using list comprehensions - ", c)
| 22.103448
| 105
| 0.555382
|
f7f2dfdf55f1fde84f99c713b689479454f6e33f
| 46,209
|
py
|
Python
|
salt/modules/win_service.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_service.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_service.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Windows Service module.
.. versionchanged:: 2016.11.0 - Rewritten to use PyWin32
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import logging
import re
import time
import salt.utils.path
# Import Salt libs
import salt.utils.platform
import salt.utils.path
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import win32security
import win32service
import win32serviceutil
import pywintypes
HAS_WIN32_MODS = True
except ImportError:
HAS_WIN32_MODS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "service"
SERVICE_TYPE = {
1: "Kernel Driver",
2: "File System Driver",
4: "Adapter Driver",
8: "Recognizer Driver",
16: "Win32 Own Process",
32: "Win32 Share Process",
256: "Interactive",
"kernel": 1,
"filesystem": 2,
"adapter": 4,
"recognizer": 8,
"own": 16,
"share": 32,
}
SERVICE_CONTROLS = {
1: "Stop",
2: "Pause/Continue",
4: "Shutdown",
8: "Change Parameters",
16: "Netbind Change",
32: "Hardware Profile Change",
64: "Power Event",
128: "Session Change",
256: "Pre-Shutdown",
512: "Time Change",
1024: "Trigger Event",
}
SERVICE_STATE = {
1: "Stopped",
2: "Start Pending",
3: "Stop Pending",
4: "Running",
5: "Continue Pending",
6: "Pause Pending",
7: "Paused",
}
SERVICE_ERRORS = {0: "No Error", 1066: "Service Specific Error"}
SERVICE_START_TYPE = {
"boot": 0,
"system": 1,
"auto": 2,
"manual": 3,
"disabled": 4,
0: "Boot",
1: "System",
2: "Auto",
3: "Manual",
4: "Disabled",
}
SERVICE_ERROR_CONTROL = {
0: "Ignore",
1: "Normal",
2: "Severe",
3: "Critical",
"ignore": 0,
"normal": 1,
"severe": 2,
"critical": 3,
}
def __virtual__():
"""
Only works on Windows systems with PyWin32 installed
"""
if not salt.utils.platform.is_windows():
return False, "Module win_service: module only works on Windows."
if not HAS_WIN32_MODS:
return False, "Module win_service: failed to load win32 modules"
return __virtualname__
class ServiceDependencies(object):
'''
Helper class which provides functionality to get all dependencies and
parents of a Windows service
Args:
name (str): The name of the service. This is not the display name.
Use ``get_service_name`` to find the service name.
all_services (callback): The name of the method which
provides a list of all available service names as done by
the ``win_service.get_all()`` method.
service_info (callback): The name of the method which
allows to pass the service name and returns a dict with meets
the requirements ``{service_name: {'Dependencies': []}}`` as
done by the ``win_service.info(name)`` method
'''
def __init__(self, name, all_services, service_info):
# Sort for predictable behavior
self._all_services = sorted(all_services())
self._name = self._normalize_name(self._all_services, name)
self._service_info = self._populate_service_info(self._all_services, service_info)
def _populate_service_info(self, all_services, service_info):
ret = {}
for name in all_services:
dependencies = service_info(name).get('Dependencies', [])
# Sort for predictable behavior
ret[name] = sorted(self._normalize_multiple_name(all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret[name])
return ret
def _dependencies(self, name):
dependencies = self._service_info.get(name, [])
# Sort for predictable behavior
ret = sorted(self._normalize_multiple_name(self._all_services, *dependencies))
log.trace("Added dependencies of %s: %s", name, ret)
return ret
def _dependencies_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
dependencies = self._dependencies(name)
for dependency in dependencies:
indirect_dependencies = self._dependencies_recursion(dependency)
for indirect_dependency in indirect_dependencies:
if indirect_dependency not in ret:
ret.append(indirect_dependency)
for dependency in dependencies:
if dependency not in ret:
ret.append(dependency)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _normalize_name(self, references, difference):
# Normalize Input
normalized = self._normalize_multiple_name(references, difference)
if not normalized:
raise ValueError("The provided name '{}' does not exist".format(difference))
return normalized[0]
def _normalize_multiple_name(self, references, *differences):
# Normalize Input
ret = list()
for difference in differences:
difference_str = str(difference)
for reference in references:
reference_str = str(reference)
if reference_str.lower() == difference_str.lower() and reference_str not in ret:
ret.append(reference_str)
break
return ret
def dependencies(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._dependencies_recursion(normalized)
else:
ret = self._dependencies(normalized)
log.trace("Dependencies of '%s': '%s'", normalized, ret)
return ret
def _parents(self, name):
# Using a list here to maintain order
ret = list()
try:
# Sort for predictable behavior
for service, dependencies in sorted(self._service_info.items()):
if name in dependencies:
if service in ret:
ret.remove(service)
ret.append(service)
except Exception as e:
log.debug(e)
ret = list()
return ret
def _parents_recursion(self, name):
# Using a list here to maintain order
ret = list()
try:
parents = self._parents(name)
for parent in parents:
if parent not in ret:
ret.append(parent)
for parent in parents:
indirect_parents = self._parents_recursion(parent)
for indirect_parent in indirect_parents:
if indirect_parent in ret:
ret.remove(indirect_parent)
ret.append(indirect_parent)
except Exception as e:
log.debug(e)
ret = list()
return ret
def parents(self, with_indirect=False):
normalized = self._normalize_name(self._all_services, self._name)
if bool(with_indirect):
ret = self._parents_recursion(normalized)
else:
ret = self._parents(normalized)
log.trace("Parents of '%s': '%s'", normalized, ret)
return ret
def start_order(self, with_deps=False, with_parents=False):
ret = []
if with_deps:
ret.extend(self.dependencies(with_indirect=True))
normalized = self._normalize_name(self._all_services, self._name)
ret.append(normalized)
if with_parents:
ret.extend(self.parents(with_indirect=True))
return ret
def stop_order(self, with_deps=False, with_parents=False):
order = self.start_order(with_deps=with_deps, with_parents=with_parents)
order.reverse()
return order
def _status_wait(service_name, end_time, service_states):
"""
Helper function that will wait for the status of the service to match the
provided status before an end time expires. Used for service stop and start
.. versionadded:: 2017.7.9,2018.3.4
Args:
service_name (str):
The name of the service
end_time (float):
A future time. e.g. time.time() + 10
service_states (list):
Services statuses to wait for as returned by info()
Returns:
dict: A dictionary containing information about the service.
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
"""
info_results = info(service_name)
while info_results["Status"] in service_states and time.time() < end_time:
# From Microsoft: Do not wait longer than the wait hint. A good interval
# is one-tenth of the wait hint but not less than 1 second and not more
# than 10 seconds.
# https://docs.microsoft.com/en-us/windows/desktop/services/starting-a-service
# https://docs.microsoft.com/en-us/windows/desktop/services/stopping-a-service
# Wait hint is in ms
wait_time = info_results["Status_WaitHint"]
# Convert to seconds or 0
wait_time = wait_time / 1000 if wait_time else 0
if wait_time < 1:
wait_time = 1
elif wait_time > 10:
wait_time = 10
time.sleep(wait_time)
info_results = info(service_name)
return info_results
def _cmd_quote(cmd):
r"""
Helper function to properly format the path to the binary for the service
Must be wrapped in double quotes to account for paths that have spaces. For
example:
``"C:\Program Files\Path\to\bin.exe"``
Args:
cmd (str): Full path to the binary
Returns:
str: Properly quoted path to the binary
"""
# Remove all single and double quotes from the beginning and the end
pattern = re.compile("^(\\\"|').*|.*(\\\"|')$")
while pattern.match(cmd) is not None:
cmd = cmd.strip('"').strip("'")
# Ensure the path to the binary is wrapped in double quotes to account for
# spaces in the path
cmd = '"{0}"'.format(cmd)
return cmd
def get_enabled():
"""
Return a list of enabled services. Enabled is defined as a service that is
marked to Auto Start.
Returns:
list: A list of enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
"""
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service["ServiceName"])["StartType"] in ["Auto"]:
services.add(service["ServiceName"])
return sorted(services)
def get_disabled():
"""
Return a list of disabled services. Disabled is defined as a service that is
marked 'Disabled' or 'Manual'.
Returns:
list: A list of disabled services.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
raw_services = _get_services()
services = set()
for service in raw_services:
if info(service["ServiceName"])["StartType"] in ["Manual", "Disabled"]:
services.add(service["ServiceName"])
return sorted(services)
def available(name):
"""
Check if a service is available on the system.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is available, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.available <service name>
"""
for service in get_all():
if name.lower() == service.lower():
return True
return False
def missing(name):
"""
The inverse of service.available.
Args:
name (str): The name of the service to check
Returns:
bool: ``True`` if the service is missing, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.missing <service name>
"""
return name not in get_all()
def _get_services():
"""
Returns a list of all services on the system.
"""
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ENUMERATE_SERVICE
)
try:
services = win32service.EnumServicesStatusEx(handle_scm)
except AttributeError:
services = win32service.EnumServicesStatus(handle_scm)
finally:
win32service.CloseServiceHandle(handle_scm)
return services
def get_all():
"""
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
services = _get_services()
ret = set()
for service in services:
ret.add(service["ServiceName"])
return sorted(ret)
def get_service_name(*args):
"""
The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by adding arguments of Display Names.
If no args are passed, return a dict of all services where the keys are the
service Display Names and the values are the Service Names.
If arguments are passed, create a dict of Display Names and Service Names
Returns:
dict: A dictionary of display names and service names
CLI Examples:
.. code-block:: bash
salt '*' service.get_service_name
salt '*' service.get_service_name 'Google Update Service (gupdate)' 'DHCP Client'
"""
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if (
raw_service["DisplayName"] in args
or raw_service["ServiceName"] in args
or raw_service["ServiceName"].lower() in args
):
services[raw_service["DisplayName"]] = raw_service["ServiceName"]
else:
services[raw_service["DisplayName"]] = raw_service["ServiceName"]
return services
def info(name):
"""
Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler
"""
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT
)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed to connect to the SCM: {0}".format(exc.strerror)
)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_ENUMERATE_DEPENDENTS
| win32service.SERVICE_INTERROGATE
| win32service.SERVICE_QUERY_CONFIG
| win32service.SERVICE_QUERY_STATUS,
)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed To Open {0}: {1}".format(name, exc.strerror)
)
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION
)
except pywintypes.error:
description = "Failed to get description"
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO
)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName("", "NT Service\\{0}".format(name))[0]
ret["sid"] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret["sid"] = "Failed to get SID"
ret["BinaryPath"] = config_info[3]
ret["LoadOrderGroup"] = config_info[4]
ret["TagID"] = config_info[5]
ret["Dependencies"] = config_info[6]
ret["ServiceAccount"] = config_info[7]
ret["DisplayName"] = config_info[8]
ret["Description"] = description
ret["Status_ServiceCode"] = status_info["ServiceSpecificExitCode"]
ret["Status_CheckPoint"] = status_info["CheckPoint"]
ret["Status_WaitHint"] = status_info["WaitHint"]
ret["StartTypeDelayed"] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret["ServiceType"] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info["ControlsAccepted"] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret["ControlsAccepted"] = flags if flags else status_info["ControlsAccepted"]
try:
ret["Status_ExitCode"] = SERVICE_ERRORS[status_info["Win32ExitCode"]]
except KeyError:
ret["Status_ExitCode"] = status_info["Win32ExitCode"]
try:
ret["StartType"] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret["StartType"] = config_info[1]
try:
ret["ErrorControl"] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret["ErrorControl"] = config_info[2]
try:
ret["Status"] = SERVICE_STATE[status_info["CurrentState"]]
except KeyError:
ret["Status"] = status_info["CurrentState"]
return ret
def start(name, timeout=90, with_deps=False, with_parents=False):
'''
Start the specified service.
.. warning::
You cannot start a disabled service in Windows. If the service is
disabled, it will be changed to ``Manual`` start.
Args:
name (str): The name of the service to start
timeout (int):
The time in seconds to wait for the service to start before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled start the given service and the services the current
service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be start
service, this flag indicates that those other services will be started
as well.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already started
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
# Set the service to manual if disabled
if disabled(name):
modify(name, start_type="Manual")
ret = set()
# Using a list here to maintain order
services = ServiceDependencies(name, get_all, info)
start = services.start_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Starting services %s", start)
for name in start:
try:
win32serviceutil.StartService(name)
except pywintypes.error as exc:
if exc.winerror != 1056:
raise CommandExecutionError(
'Failed To Start {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Start Pending', 'Stopped'])
ret.add(srv_status['Status'] == 'Running')
return False not in ret
def stop(name, timeout=90, with_deps=False, with_parents=False):
'''
Stop the specified service
Args:
name (str): The name of the service to stop
timeout (int):
The time in seconds to wait for the service to stop before
returning. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled stop the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be stopped
service, this flag indicates that those other services will be stopped
as well.
If disabled, the service stop will fail in case other running services
depend on the to be stopped service.
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is already stopped
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
ret = set()
services = ServiceDependencies(name, get_all, info)
stop = services.stop_order(with_deps=with_deps, with_parents=with_parents)
log.debug("Stopping services %s", stop)
for name in stop:
try:
win32serviceutil.StopService(name)
except pywintypes.error as exc:
if exc.winerror != 1062:
raise CommandExecutionError(
'Failed To Stop {0}: {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not running', name)
srv_status = _status_wait(service_name=name,
end_time=time.time() + int(timeout),
service_states=['Running', 'Stop Pending'])
ret.add(srv_status['Status'] == 'Stopped')
return False not in ret
def restart(name, timeout=90, with_deps=False, with_parents=False):
'''
Restart the named service. This issues a stop command followed by a start.
Args:
name: The name of the service to restart.
.. note::
If the name passed is ``salt-minion`` a scheduled task is
created and executed to restart the salt-minion service.
timeout (int):
The time in seconds to wait for the service to stop and start before
returning. Default is 90 seconds
.. note::
The timeout is cumulative meaning it is applied to the stop and
then to the start command. A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
.. versionadded:: 2017.7.9,2018.3.4
with_deps (bool):
If enabled restart the given service and the services
the current service depends on.
with_parents (bool):
If enabled and in case other running services depend on the to be
restarted service, this flag indicates that those other services
will be restarted as well.
If disabled, the service restart will fail in case other running
services depend on the to be restarted service.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
if "salt-minion" in name:
create_win_salt_restart_task()
return execute_salt_restart_task()
ret = set()
ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))
return False not in ret
def create_win_salt_restart_task():
"""
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
# Updated to use full name for Nessus agent
cmd = salt.utils.path.which('cmd')
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00')
def execute_salt_restart_task():
"""
Run the Windows Salt restart task
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.execute_salt_restart_task()
"""
return __salt__["task.run"](name="restart-salt-minion")
def status(name, *args, **kwargs):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
"""
results = {}
all_services = get_all()
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(all_services, name)
else:
services = [name]
for service in services:
results[service] = info(service)["Status"] in ["Running", "Stop Pending"]
if contains_globbing:
return results
return results[name]
def getsid(name):
"""
Return the SID for this windows service
Args:
name (str): The name of the service for which to return the SID
Returns:
str: A string representing the SID for the service
CLI Example:
.. code-block:: bash
salt '*' service.getsid <service name>
"""
return info(name)["sid"]
def modify(
name,
bin_path=None,
exe_args=None,
display_name=None,
description=None,
service_type=None,
start_type=None,
start_delayed=None,
error_control=None,
load_order_group=None,
dependencies=None,
account_name=None,
account_password=None,
run_interactive=None,
):
# pylint: disable=anomalous-backslash-in-string
"""
Modify a service's parameters. Changes will not be made for parameters that
are not passed.
.. versionadded:: 2016.11.0
Args:
name (str):
The name of the service. Can be found using the
``service.get_service_name`` function
bin_path (str):
The path to the service executable. Backslashes must be escaped, eg:
``C:\\path\\to\\binary.exe``
exe_args (str):
Any arguments required by the service executable
display_name (str):
The name to display in the service manager
description (str):
The description to display for the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set.
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal: Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: a dictionary of changes made
CLI Example:
.. code-block:: bash
salt '*' service.modify spooler start_type=disabled
"""
# pylint: enable=anomalous-backslash-in-string
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681987(v=vs.85).aspx
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms681988(v-vs.85).aspx
handle_scm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm,
name,
win32service.SERVICE_CHANGE_CONFIG | win32service.SERVICE_QUERY_CONFIG,
)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed To Open {0}: {1}".format(name, exc.strerror)
)
config_info = win32service.QueryServiceConfig(handle_svc)
changes = dict()
# Input Validation
if bin_path is not None:
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = "{0} {1}".format(bin_path, exe_args)
changes["BinaryPath"] = bin_path
if service_type is not None:
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError(
"Invalid Service Type: {0}".format(service_type)
)
else:
if run_interactive is True:
service_type = config_info[0] | \
win32service.SERVICE_INTERACTIVE_PROCESS
elif run_interactive is False:
service_type = config_info[0] ^ \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
service_type = win32service.SERVICE_NO_CHANGE
if service_type is not win32service.SERVICE_NO_CHANGE:
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int) and service_type & bit:
flags.append(SERVICE_TYPE[bit])
changes["ServiceType"] = flags if flags else service_type
if start_type is not None:
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError("Invalid Start Type: {0}".format(start_type))
changes["StartType"] = SERVICE_START_TYPE[start_type]
else:
start_type = win32service.SERVICE_NO_CHANGE
if error_control is not None:
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError(
"Invalid Error Control: {0}".format(error_control)
)
changes["ErrorControl"] = SERVICE_ERROR_CONTROL[error_control]
else:
error_control = win32service.SERVICE_NO_CHANGE
if account_name is not None:
changes["ServiceAccount"] = account_name
if account_name in ["LocalSystem", "LocalService", "NetworkService"]:
account_password = ""
if account_password is not None:
changes["ServiceAccountPassword"] = "XXX-REDACTED-XXX"
if load_order_group is not None:
changes["LoadOrderGroup"] = load_order_group
if dependencies is not None:
changes["Dependencies"] = dependencies
if display_name is not None:
changes["DisplayName"] = display_name
win32service.ChangeServiceConfig(
handle_svc,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
display_name,
)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description
)
changes["Description"] = description
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
# Start type -1 is no change
if (start_type == -1 and config_info[1] == 2) or start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc,
win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed,
)
changes["StartTypeDelayed"] = start_delayed
else:
changes["Warning"] = 'start_delayed: Requires start_type "auto"'
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return changes
def enable(name, start_type="auto", start_delayed=False, **kwargs):
"""
Enable the named service to start at boot
Args:
name (str): The name of the service to enable.
start_type (str): Specifies the service start type. Valid options are as
follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual: Service must be started manually
- disabled: Service cannot be started
start_delayed (bool): Set the service to Auto(Delayed Start). Only valid
if the start_type is set to ``Auto``. If service_type is not passed,
but the service is already set to ``Auto``, then the flag will be
set.
Returns:
bool: ``True`` if successful, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
"""
modify(name, start_type=start_type, start_delayed=start_delayed)
svcstat = info(name)
if start_type.lower() == "auto":
return (
svcstat["StartType"].lower() == start_type.lower()
and svcstat["StartTypeDelayed"] == start_delayed
)
else:
return svcstat["StartType"].lower() == start_type.lower()
def disable(name, **kwargs):
"""
Disable the named service to start at boot
Args:
name (str): The name of the service to disable
Returns:
bool: ``True`` if disabled, ``False`` otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
"""
modify(name, start_type="Disabled")
return info(name)["StartType"] == "Disabled"
def enabled(name, **kwargs):
"""
Check to see if the named service is enabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is set to start
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
"""
return info(name)["StartType"] == "Auto"
def disabled(name):
"""
Check to see if the named service is disabled to start on boot
Args:
name (str): The name of the service to check
Returns:
bool: True if the service is disabled
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
"""
return not enabled(name)
def create(
name,
bin_path,
exe_args=None,
display_name=None,
description=None,
service_type="own",
start_type="manual",
start_delayed=False,
error_control="normal",
load_order_group=None,
dependencies=None,
account_name=".\\LocalSystem",
account_password=None,
run_interactive=False,
**kwargs
):
"""
Create the named service.
.. versionadded:: 2015.8.0
Args:
name (str):
Specifies the service name. This is not the display_name
bin_path (str):
Specifies the path to the service binary file. Backslashes must be
escaped, eg: ``C:\\path\\to\\binary.exe``
exe_args (str):
Any additional arguments required by the service binary.
display_name (str):
The name to be displayed in the service manager. If not passed, the
``name`` will be used
description (str):
A description of the service
service_type (str):
Specifies the service type. Default is ``own``. Valid options are as
follows:
- kernel: Driver service
- filesystem: File system driver service
- adapter: Adapter driver service (reserved)
- recognizer: Recognizer driver service (reserved)
- own (default): Service runs in its own process
- share: Service shares a process with one or more other services
start_type (str):
Specifies the service start type. Valid options are as follows:
- boot: Device driver that is loaded by the boot loader
- system: Device driver that is started during kernel initialization
- auto: Service that automatically starts
- manual (default): Service must be started manually
- disabled: Service cannot be started
start_delayed (bool):
Set the service to Auto(Delayed Start). Only valid if the start_type
is set to ``Auto``. If service_type is not passed, but the service
is already set to ``Auto``, then the flag will be set. Default is
``False``
error_control (str):
The severity of the error, and action taken, if this service fails
to start. Valid options are as follows:
- normal (normal): Error is logged and a message box is displayed
- severe: Error is logged and computer attempts a restart with the
last known good configuration
- critical: Error is logged, computer attempts to restart with the
last known good configuration, system halts on failure
- ignore: Error is logged and startup continues, no notification is
given to the user
load_order_group (str):
The name of the load order group to which this service belongs
dependencies (list):
A list of services or load ordering groups that must start before
this service
account_name (str):
The name of the account under which the service should run. For
``own`` type services this should be in the ``domain\\username``
format. The following are examples of valid built-in service
accounts:
- NT Authority\\LocalService
- NT Authority\\NetworkService
- NT Authority\\LocalSystem
- .\\LocalSystem
account_password (str):
The password for the account name specified in ``account_name``. For
the above built-in accounts, this can be None. Otherwise a password
must be specified.
run_interactive (bool):
If this setting is True, the service will be allowed to interact
with the user. Not recommended for services that run with elevated
privileges.
Returns:
dict: A dictionary containing information about the new service
CLI Example:
.. code-block:: bash
salt '*' service.create <service name> <path to exe> display_name='<display name>'
"""
if display_name is None:
display_name = name
# Test if the service already exists
if name in get_all():
raise CommandExecutionError("Service Already Exists: {0}".format(name))
# shlex.quote the path to the binary
bin_path = _cmd_quote(bin_path)
if exe_args is not None:
bin_path = "{0} {1}".format(bin_path, exe_args)
if service_type.lower() in SERVICE_TYPE:
service_type = SERVICE_TYPE[service_type.lower()]
if run_interactive:
service_type = service_type | \
win32service.SERVICE_INTERACTIVE_PROCESS
else:
raise CommandExecutionError("Invalid Service Type: {0}".format(service_type))
if start_type.lower() in SERVICE_START_TYPE:
start_type = SERVICE_START_TYPE[start_type.lower()]
else:
raise CommandExecutionError("Invalid Start Type: {0}".format(start_type))
if error_control.lower() in SERVICE_ERROR_CONTROL:
error_control = SERVICE_ERROR_CONTROL[error_control.lower()]
else:
raise CommandExecutionError("Invalid Error Control: {0}".format(error_control))
if start_delayed:
if start_type != 2:
raise CommandExecutionError(
'Invalid Parameter: start_delayed requires start_type "auto"'
)
if account_name in [
"LocalSystem",
".\\LocalSystem",
"LocalService",
".\\LocalService",
"NetworkService",
".\\NetworkService",
]:
account_password = ""
# Connect to Service Control Manager
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_ALL_ACCESS
)
# Create the service
handle_svc = win32service.CreateService(
handle_scm,
name,
display_name,
win32service.SERVICE_ALL_ACCESS,
service_type,
start_type,
error_control,
bin_path,
load_order_group,
0,
dependencies,
account_name,
account_password,
)
if description is not None:
win32service.ChangeServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION, description
)
if start_delayed is not None:
# You can only set delayed start for services that are set to auto start
# Start type 2 is Auto
if start_type == 2:
win32service.ChangeServiceConfig2(
handle_svc,
win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
start_delayed,
)
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
return info(name)
def delete(name, timeout=90):
"""
Delete the named service
Args:
name (str): The name of the service to delete
timeout (int):
The time in seconds to wait for the service to be deleted before
returning. This is necessary because a service must be stopped
before it can be deleted. Default is 90 seconds
.. versionadded:: 2017.7.9,2018.3.4
Returns:
bool: ``True`` if successful, otherwise ``False``. Also returns ``True``
if the service is not present
CLI Example:
.. code-block:: bash
salt '*' service.delete <service name>
"""
handle_scm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_CONNECT)
try:
handle_svc = win32service.OpenService(
handle_scm, name, win32service.SERVICE_ALL_ACCESS
)
except pywintypes.error as exc:
win32service.CloseServiceHandle(handle_scm)
if exc.winerror != 1060:
raise CommandExecutionError(
'Failed to open {0}. {1}'.format(name, exc.strerror))
log.debug('Service "%s" is not present', name)
return True
try:
win32service.DeleteService(handle_svc)
except pywintypes.error as exc:
raise CommandExecutionError(
"Failed to delete {0}. {1}".format(name, exc.strerror)
)
finally:
log.debug("Cleaning up")
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
end_time = time.time() + int(timeout)
while name in get_all() and time.time() < end_time:
time.sleep(1)
return name not in get_all()
| 30.90903
| 96
| 0.618819
|
7ca5e303d6e6b0a70f58133627907a26479fa67d
| 637
|
py
|
Python
|
sdapis/migrations/0021_auto_20211023_2104.py
|
mmtahir-dev/cmput404-socialdistribution
|
a5a4749c8d9c27ccd062e33be5a4fb2f76697394
|
[
"MIT"
] | null | null | null |
sdapis/migrations/0021_auto_20211023_2104.py
|
mmtahir-dev/cmput404-socialdistribution
|
a5a4749c8d9c27ccd062e33be5a4fb2f76697394
|
[
"MIT"
] | 49
|
2021-09-28T21:52:40.000Z
|
2021-10-03T22:35:49.000Z
|
sdapis/migrations/0021_auto_20211023_2104.py
|
mmtahir-dev/cmput404-socialdistribution
|
a5a4749c8d9c27ccd062e33be5a4fb2f76697394
|
[
"MIT"
] | 1
|
2021-09-27T03:20:36.000Z
|
2021-09-27T03:20:36.000Z
|
# Generated by Django 3.2.8 on 2021-10-24 03:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sdapis', '0020_post_post_id'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='author_id',
),
migrations.AddField(
model_name='post',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post', to=settings.AUTH_USER_MODEL),
),
]
| 25.48
| 142
| 0.627943
|
30ef77d4215d0bcf60c119a08389ff59a69bafec
| 1,870
|
py
|
Python
|
loss.py
|
dropinky/GroupProject
|
dcf22adffb6e7767ba30ab227bbf2c8b1ede770e
|
[
"MIT"
] | null | null | null |
loss.py
|
dropinky/GroupProject
|
dcf22adffb6e7767ba30ab227bbf2c8b1ede770e
|
[
"MIT"
] | null | null | null |
loss.py
|
dropinky/GroupProject
|
dcf22adffb6e7767ba30ab227bbf2c8b1ede770e
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import models
class VGG16FeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
vgg16 = models.vgg16(pretrained=True)
self.enc_1 = nn.Sequential(*vgg16.features[:5])
self.enc_2 = nn.Sequential(*vgg16.features[5:10])
self.enc_3 = nn.Sequential(*vgg16.features[10:17])
self.enc_4 = nn.Sequential(*vgg16.features[17:23])
# print(self.enc_1)
# print(self.enc_2)
# print(self.enc_3)
# print(self.enc_4)
# fix the encoder
for i in range(4):
for param in getattr(self, 'enc_{:d}'.format(i + 1)).parameters():
param.requires_grad = False
def forward(self, image):
results = [image]
for i in range(4):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
class ConsistencyLoss(nn.Module):
def __init__(self):
super().__init__()
self.vgg = VGG16FeatureExtractor()
self.l2 = nn.MSELoss()
def forward(self, csa, csa_d, target, mask):
vgg_gt = self.vgg(target)
vgg_gt = vgg_gt[-1]
mask_r = F.interpolate(mask, size=csa.size()[2:])
lossvalue = self.l2(csa*mask_r, vgg_gt*mask_r) + self.l2(csa_d*mask_r, vgg_gt*mask_r)
return lossvalue
def calc_gan_loss(discriminator, output, target):
y_pred_fake = discriminator(output, target)
y_pred = discriminator(target, output)
g_loss = (torch.mean((y_pred - torch.mean(y_pred_fake) + 1.) ** 2) + torch.mean((y_pred_fake - torch.mean(y_pred) - 1.) ** 2))/2
d_loss = (torch.mean((y_pred - torch.mean(y_pred_fake) - 1.) ** 2) + torch.mean((y_pred_fake - torch.mean(y_pred) + 1.) ** 2))/2
return g_loss, d_loss
| 30.655738
| 132
| 0.611765
|
e7ce425f8e8fc0a2a5cd159ade471d27999bb459
| 728
|
py
|
Python
|
uw-plugins/php/uwsgiplugin.py
|
jyotikamboj/container
|
a2abba963b06429c82025f7479b2ae41e8d3a56f
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2019-01-14T10:58:43.000Z
|
2019-01-14T10:58:43.000Z
|
uw-plugins/php/uwsgiplugin.py
|
jyotikamboj/container
|
a2abba963b06429c82025f7479b2ae41e8d3a56f
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
uw-plugins/php/uwsgiplugin.py
|
jyotikamboj/container
|
a2abba963b06429c82025f7479b2ae41e8d3a56f
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
import os
NAME='php'
ld_run_path = None
PHPPATH = 'php-config'
phpdir = os.environ.get('UWSGICONFIG_PHPDIR')
if phpdir:
ld_run_path = "%s/lib" % phpdir
PHPPATH = "%s/bin/php-config" % phpdir
PHPPATH = os.environ.get('UWSGICONFIG_PHPPATH', PHPPATH)
CFLAGS = [os.popen(PHPPATH + ' --includes').read().rstrip(), '-Wno-sign-compare']
LDFLAGS = os.popen(PHPPATH + ' --ldflags').read().rstrip().split()
if ld_run_path:
LDFLAGS.append('-L%s' % ld_run_path)
os.environ['LD_RUN_PATH'] = ld_run_path
LIBS = [os.popen(PHPPATH + ' --libs').read().rstrip(), '-lphp5']
phplibdir = os.environ.get('UWSGICONFIG_PHPLIBDIR')
if phplibdir:
LIBS.append('-Wl,-rpath=%s' % phplibdir)
GCC_LIST = ['php_plugin', 'session']
| 25.103448
| 81
| 0.671703
|
76415fb86d287dcc4cf3ecce5eea95627b632b9c
| 2,981
|
py
|
Python
|
demo/BERT/helpers/convert_weights.py
|
kevinch-nv/TensorRT
|
73225b9acef41426ec7ef00578f8de985899726b
|
[
"Apache-2.0"
] | 3
|
2019-11-18T14:04:41.000Z
|
2020-06-19T07:35:19.000Z
|
demo/BERT/helpers/convert_weights.py
|
kevinch-nv/TensorRT
|
73225b9acef41426ec7ef00578f8de985899726b
|
[
"Apache-2.0"
] | null | null | null |
demo/BERT/helpers/convert_weights.py
|
kevinch-nv/TensorRT
|
73225b9acef41426ec7ef00578f8de985899726b
|
[
"Apache-2.0"
] | 1
|
2019-12-11T01:43:05.000Z
|
2019-12-11T01:43:05.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import struct
import argparse
import re
try:
from tensorflow.python import pywrap_tensorflow as pyTF
except ImportError as err:
sys.stderr.write("""Error: Failed to import tensorflow module ({})""".format(err))
sys.exit()
parser = argparse.ArgumentParser(description='TensorFlow to TensorRT Weight Dumper')
parser.add_argument('-m', '--model', required=True, help='The checkpoint file basename, example basename(model.ckpt-766908.data-00000-of-00001) -> model.ckpt-766908')
parser.add_argument('-o', '--output', required=True, help='The weight file to dump all the weights to.')
opt = parser.parse_args()
print( "Outputting the trained weights in TensorRT's in the following format:")
print( "Line 0: <number of buffers N in the file>")
print( "Line 1-N: [buffer name] [buffer type] [number of dims D] [space-sep. shape list: d_1 d_2 ... d_D] <d_1 * d_2 * ... * d_D * sizeof(type) bytes of data>")
print( "Buffer type is hard-coded to 0 (float), i.e. 4 bytes per coefficient")
inputbase = opt.model
outputbase = opt.output
reader = pyTF.NewCheckpointReader(inputbase)
tensor_dict = reader.get_variable_to_shape_map()
out_fn = outputbase + ".weights"
with open(out_fn, 'wb') as output_file:
# there might be training-related variables in the checkpoint that can be discarded
exclude_list = ["adam", "global_step", "pooler", "bad_steps", "good_steps", "loss_scale"]
param_names = [key for key in sorted(tensor_dict) if all([exclude not in key for exclude in exclude_list])]
count = len(param_names)
print(count)
output_file.write('{}\n'.format(count).encode('ASCII'))
for pn in param_names:
toks = pn.lower().split('/')
if 'encoder' in pn:
assert('layer' in pn)
l = (re.findall('\d+',pn))[0]
outname = 'l{}_'.format(l) + '_'.join(toks[3:])
else:
outname = '_'.join(toks)
tensor = reader.get_tensor(pn)
shape = tensor.shape
flat_tensor = tensor.flatten()
shape_str = '{} '.format(len(shape)) + ' '.join([str(d) for d in shape])
output_file.write('{} 0 {} '.format(outname, shape_str).encode('ASCII'))
output_file.write(flat_tensor.tobytes())
output_file.write('\n'.encode('ASCII'));
print('Orig.name:', pn,'TRT name:', outname, 'shape:' , shape_str)
| 40.283784
| 166
| 0.684334
|
cd3f77985d426e29e36091e06b448642a9d381a3
| 2,922
|
py
|
Python
|
ancestries.py
|
shendi-project/shendi
|
30a1c0dd29f123269a5fb31caccaf113a94f6ddf
|
[
"MIT"
] | 3
|
2020-09-02T16:57:16.000Z
|
2021-08-22T06:23:39.000Z
|
ancestries.py
|
shendi-project/shendi
|
30a1c0dd29f123269a5fb31caccaf113a94f6ddf
|
[
"MIT"
] | 29
|
2020-09-13T13:34:16.000Z
|
2020-09-22T11:42:06.000Z
|
ancestries.py
|
shendi-project/shendi
|
30a1c0dd29f123269a5fb31caccaf113a94f6ddf
|
[
"MIT"
] | null | null | null |
import json
import nethysdb
# I don't know if this is useful, but for now I am leaving it here just in case
class Ancestry(nethysdb.NethysDB):
def __init__(self, link, SourceBook, Page, name, description, YouMight, OthersProbably, PhysicalDescription, Society, AlignmentAndReligion, Adventurers, Names, Hitpoints, Size, Speed, AbilityBoost1, AbilityBoost2, AbilityFlaw):
super().__init__(link, SourceBook, Page)
self.name = name
self.description = description
self.YouMight = YouMight
self.OthersProbably = OthersProbably
self.PhysicalDescription = PhysicalDescription
self.Society = Society
self.AlignmentAndReligion = AlignmentAndReligion
self.Adventurers = Adventurers
self.Names = Names
self.Hitpoints = Hitpoints
self.Size = Size
self.Speed = Speed
self.AbilityBoost1 = AbilityBoost1
self.AbilityBoost2 = AbilityBoost2
self.AbilityFlaw = AbilityFlaw
with open('data/ancestries.json') as f:
data = json.load(f)
list_of_ancestries = []
# grab data from JSON, create classes and append ancestries on the list
for ancestry in data['ancestries']:
name = ancestry['race']
link = ancestry['NethysUrl']
SourceBook = ancestry['Source']
Page = ancestry['Page']
description = ancestry['Description']
YouMight = ancestry['YouMight']
OthersProbably = ancestry['OthersProbably']
PhysicalDescription = ancestry['Physical Description']
Society = ancestry['Society']
AlignmentAndReligion = ancestry['Alignment and Religion']
Adventurers = ancestry['Adventurers']
Names = ancestry['Names']
Hitpoints = ancestry['Hit Points']
Size = ancestry['Size']
Speed = ancestry['Speed']
AbilityBoost1 = ancestry['Ability Boosts'][0]
AbilityBoost2 = ancestry['Ability Boosts'][1]
AbilityFlaw = ancestry['Ability Flaw'][0]
name = Ancestry(link, SourceBook, Page, name, description, YouMight, OthersProbably, PhysicalDescription, Society, AlignmentAndReligion, Adventurers, Names, Hitpoints, Size, Speed, AbilityBoost1, AbilityBoost2, AbilityFlaw)
list_of_ancestries.append(ancestry['race'])
def get_boosts(ancestry):
if ancestry in list_of_ancestries:
index = list_of_ancestries.index(ancestry)
boosts = []
boosts.append(data['ancestries'][index]['Ability Boosts'][0])
boosts.append(data['ancestries'][index]['Ability Boosts'][1])
return boosts
else:
raise Exception("Ancestry not found in the list_of_ancestries")
def get_flaw(ancestry, n):
if ancestry in list_of_ancestries:
index = list_of_ancestries.index(ancestry)
return data['ancestries'][index]['Ability Flaw'][n]
else:
raise Exception("Ancestry not found in the list_of_ancestries")
def main():
print(list_of_ancestries)
if __name__ == '__main__':
main()
| 36.987342
| 231
| 0.697125
|
633eed290af5325eec448b70aa18483dc95e05f8
| 263,640
|
py
|
Python
|
autoprotocol/protocol.py
|
sreetejreddy/autoprotocol-python
|
ce32b1c86b0e0e2719931bad39c522ea6ba34a63
|
[
"BSD-3-Clause"
] | null | null | null |
autoprotocol/protocol.py
|
sreetejreddy/autoprotocol-python
|
ce32b1c86b0e0e2719931bad39c522ea6ba34a63
|
[
"BSD-3-Clause"
] | null | null | null |
autoprotocol/protocol.py
|
sreetejreddy/autoprotocol-python
|
ce32b1c86b0e0e2719931bad39c522ea6ba34a63
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Module containing the main `Protocol` object and associated functions
:copyright: 2020 by The Autoprotocol Development Team, see AUTHORS
for more details.
:license: BSD, see LICENSE for more details
"""
import warnings
from .constants import AGAR_CLLD_THRESHOLD, SPREAD_PATH
from .container import COVER_TYPES, SEAL_TYPES, Container, Well
from .container_type import _CONTAINER_TYPES, ContainerType
from .instruction import * # pylint: disable=unused-wildcard-import
from .liquid_handle import LiquidClass, Mix, Transfer
from .unit import Unit, UnitError
from .util import _check_container_type_with_shape, _validate_as_instance
class Ref(object):
"""
Link a ref name (string) to a Container instance.
"""
def __init__(self, name, opts, container):
self.name = name
self.opts = opts
self.container = container
def __repr__(self):
return f"Ref({self.name}, {self.container}, {self.opts})"
# noinspection PyCompatibility
class Protocol(object):
"""
A Protocol is a sequence of instructions to be executed, and a set of
containers on which those instructions act.
Parameters
----------
refs : list(Ref)
Pre-existing refs that the protocol should be populated with.
instructions : list(Instruction)
Pre-existing instructions that the protocol should be populated with.
propagate_properties : bool, optional
Whether liquid handling operations should propagate aliquot properties
from source to destination wells.
time_constraints : List(time_constraints)
Pre-existing time_constraints that the protocol should be populated with.
Examples
--------
Initially, a Protocol has an empty sequence of instructions and no
referenced containers. To add a reference to a container, use the ref()
method, which returns a Container.
.. code-block:: python
p = Protocol()
my_plate = p.ref("my_plate", id="ct1xae8jabbe6",
cont_type="96-pcr", storage="cold_4")
To add instructions to the protocol, use the helper methods in this class
.. code-block:: python
p.transfer(source=my_plate.well("A1"),
dest=my_plate.well("B4"),
volume="50:microliter")
p.thermocycle(my_plate, groups=[
{ "cycles": 1,
"steps": [
{ "temperature": "95:celsius",
"duration": "1:hour"
}]
}])
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"my_plate": {
"id": "ct1xae8jabbe6",
"store": {
"where": "cold_4"
}
}
},
"instructions": [
{
"groups": [
{
"transfer": [
{
"volume": "50.0:microliter",
"to": "my_plate/15",
"from": "my_plate/0"
}
]
}
],
"op": "pipette"
},
{
"volume": "10:microliter",
"dataref": null,
"object": "my_plate",
"groups": [
{
"cycles": 1,
"steps": [
{
"duration": "1:hour",
"temperature": "95:celsius"
}
]
}
],
"op": "thermocycle"
}
]
}
"""
def __init__(
self,
refs=None,
instructions=None,
propagate_properties=False,
time_constraints=None,
):
super(Protocol, self).__init__()
self.refs = refs or {}
self.instructions = instructions or []
self.propagate_properties = propagate_properties
self.time_constraints = time_constraints or []
def __repr__(self):
return f"Protocol({self.__dict__})"
def container_type(self, shortname):
"""
Convert a ContainerType shortname into a ContainerType object.
Parameters
----------
shortname : str
String representing one of the ContainerTypes in the
_CONTAINER_TYPES dictionary.
Returns
-------
ContainerType
Returns a Container type object corresponding to the shortname
passed to the function. If a ContainerType object is passed,
that same ContainerType is returned.
Raises
------
ValueError
If an unknown ContainerType shortname is passed as a parameter.
"""
if isinstance(shortname, ContainerType):
return shortname
elif shortname in _CONTAINER_TYPES:
return _CONTAINER_TYPES[shortname]
else:
raise ValueError(
f"Unknown container type {shortname}"
f"(known types={str(_CONTAINER_TYPES.keys())})"
)
# pragma pylint: disable=redefined-builtin
def ref(
self, name, id=None, cont_type=None, storage=None, discard=None, cover=None
):
"""
Add a Ref object to the dictionary of Refs associated with this protocol
and return a Container with the id, container type and storage or
discard conditions specified.
Example Usage:
.. code-block:: python
p = Protocol()
# ref a new container (no id specified)
sample_ref_1 = p.ref("sample_plate_1",
cont_type="96-pcr",
discard=True)
# ref an existing container with a known id
sample_ref_2 = p.ref("sample_plate_2",
id="ct1cxae33lkj",
cont_type="96-pcr",
storage="ambient")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"sample_plate_1": {
"new": "96-pcr",
"discard": true
},
"sample_plate_2": {
"id": "ct1cxae33lkj",
"store": {
"where": "ambient"
}
}
},
"instructions": []
}
Parameters
----------
name : str
name of the container/ref being created.
id : str, optional
id of the container being created, from your organization's
inventory on http://secure.transcriptic.com. Strings representing
ids begin with "ct".
cont_type : str or ContainerType
container type of the Container object that will be generated.
storage : Enum({"ambient", "cold_20", "cold_4", "warm_37"}), optional
temperature the container being referenced should be stored at
after a run is completed. Either a storage condition must be
specified or discard must be set to True.
discard : bool, optional
if no storage condition is specified and discard is set to True,
the container being referenced will be discarded after a run.
cover: str, optional
name of the cover which will be on the container/ref
Returns
-------
Container
Container object generated from the id and container type provided
Raises
------
RuntimeError
If a container previously referenced in this protocol (existent
in refs section) has the same name as the one specified.
RuntimeError
If no container type is specified.
RuntimeError
If no valid storage or discard condition is specified.
"""
if name in self.refs.keys():
raise RuntimeError(
"Two containers within the same protocol cannot have the same " "name."
)
opts = {}
# Check container type
try:
cont_type = self.container_type(cont_type)
if id and cont_type:
opts["id"] = id
elif cont_type:
opts["new"] = cont_type.shortname
except ValueError:
raise RuntimeError(f"{cont_type} is not a recognized container type.")
if storage:
opts["store"] = {"where": storage}
elif discard and not storage:
opts["discard"] = discard
else:
raise RuntimeError(
"You must specify either a valid storage condition or set "
"discard=True for a Ref."
)
if cover:
opts["cover"] = cover
container = Container(
id,
cont_type,
name=name,
storage=storage if storage else None,
cover=cover if cover else None,
)
self.refs[name] = Ref(name, opts, container)
return container
# pragma pylint: enable=redefined-builtin
def add_time_constraint(
self,
from_dict,
to_dict,
less_than=None,
more_than=None,
mirror=False,
ideal=None,
optimization_cost=None,
):
"""Constraint the time between two instructions
Add time constraints from `from_dict` to `to_dict`. Time constraints
guarantee that the time from the `from_dict` to the `to_dict` is less
than or greater than some specified duration. Care should be taken when
applying time constraints as constraints may make some protocols
impossible to schedule or run.
Though autoprotocol orders instructions in a list, instructions do
not need to be run in the order they are listed and instead depend on
the preceding dependencies. Time constraints should be added with such
limitations in mind.
Constraints are directional; use `mirror=True` if the time constraint
should be added in both directions. Note that mirroring is only applied
to the less_than constraint, as the more_than constraint implies both a
minimum delay betweeen two timing points and also an explicit ordering
between the two timing points.
Ideal time constraints are sometimes helpful for ensuring that a certain
set of operations happen within some specified time. This can be specified
by using the `ideal` parameter. There is an optional `optimization_cost`
parameter associated with `ideal` time constraints for specifying the
penalization system used for calculating deviations from the `ideal` time.
When left unspecified, the `optimization_cost` function defaults to linear.
Please refer to the ASC for more details on how this is implemented.
Example Usage:
.. code-block:: python
plate_1 = protocol.ref("plate_1", id=None, cont_type="96-flat",
discard=True)
plate_2 = protocol.ref("plate_2", id=None, cont_type="96-flat",
discard=True)
protocol.cover(plate_1)
time_point_1 = protocol.get_instruction_index()
protocol.cover(plate_2)
time_point_2 = protocol.get_instruction_index()
protocol.add_time_constraint(
{"mark": plate_1, "state": "start"},
{"mark": time_point_1, "state": "end"},
less_than = "1:minute")
protocol.add_time_constraint(
{"mark": time_point_2, "state": "start"},
{"mark": time_point_1, "state": "start"},
less_than = "1:minute", mirror=True)
# Ideal time constraint
protocol.add_time_constraint(
{"mark": time_point_1, "state": "start"},
{"mark": time_point_2, "state": "end"},
ideal = "30:second",
optimization_cost = "squared")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"plate_1": {
"new": "96-flat",
"discard": true
},
"plate_2": {
"new": "96-flat",
"discard": true
}
},
"time_constraints": [
{
"to": {
"instruction_end": 0
},
"less_than": "1.0:minute",
"from": {
"ref_start": "plate_1"
}
},
{
"to": {
"instruction_start": 0
},
"less_than": "1.0:minute",
"from": {
"instruction_start": 1
}
},
{
"to": {
"instruction_start": 1
},
"less_than": "1.0:minute",
"from": {
"instruction_start": 0
}
},
{
"from": {
"instruction_start": 0
},
"to": {
"instruction_end": 1
},
"ideal": {
"value": "5:minute",
"optimization_cost": "squared"
}
}
],
"instructions": [
{
"lid": "standard",
"object": "plate_1",
"op": "cover"
},
{
"lid": "standard",
"object": "plate_2",
"op": "cover"
}
]
}
Parameters
----------
from_dict: dict
Dictionary defining the initial time constraint condition.
Composed of keys: "mark" and "state"
mark: int or Container
instruction index of container
state: "start" or "end"
specifies either the start or end of the "mark" point
to_dict: dict
Dictionary defining the end time constraint condition.
Specified in the same format as from_dict
less_than: str or Unit, optional
max time between from_dict and to_dict
more_than: str or Unit, optional
min time between from_dict and to_dict
mirror: bool, optional
choice to mirror the from and to positions when time constraints
should be added in both directions
(only applies to the less_than constraint)
ideal: str or Unit, optional
ideal time between from_dict and to_dict
optimization_cost: Enum({"linear", "squared", "exponential"}), optional
cost function used for calculating the penalty for missing the
`ideal` timing
Raises
------
ValueError
If an instruction mark is less than 0
TypeError
If mark is not container or integer
TypeError
If state not in ['start', 'end']
TypeError
If any of `ideal`, `more_than`, `less_than` is not a
Unit of the 'time' dimension
KeyError
If `to_dict` or `from_dict` does not contain 'mark'
KeyError
If `to_dict` or `from_dict` does not contain 'state'
ValueError
If time is less than '0:second'
ValueError
If `optimization_cost` is specified but `ideal` is not
ValueError
If `more_than` is greater than `less_than`
ValueError
If `ideal` is smaller than `more_than` or greater than
`less_than`
RuntimeError
If `from_dict` and `to_dict` are equal
RuntimeError
If from_dict["marker"] and to_dict["marker"] are equal and
from_dict["state"] = "end"
"""
inst_string = "instruction_"
cont_string = "ref_"
state_strings = ["start", "end"]
keys = []
# Move the 4th param to mirror if the caller used the syntax
# add_time_constraint(a, b, 1:minute, True)
if type(more_than) == bool:
mirror = more_than
more_than = None
# Validate input types
def validate_timing(constraint):
if constraint is not None:
constraint = parse_unit(constraint, "minute")
if constraint < Unit(0, "second"):
raise ValueError(
f"The timing constraint {constraint} cannot be "
"less than '0:second'"
)
return constraint
more_than = validate_timing(more_than)
less_than = validate_timing(less_than)
ideal = validate_timing(ideal)
if ideal and optimization_cost is None:
optimization_cost = "linear"
if optimization_cost is not None:
if ideal is None:
raise ValueError(
"'optimization_cost' can only be specified if 'ideal'"
"is also specified"
)
ACCEPTED_COST_FUNCTIONS = ["linear", "squared", "exponential"]
if optimization_cost not in ACCEPTED_COST_FUNCTIONS:
raise ValueError(
f"'optimization_cost': {optimization_cost} has to be a "
f"member of {ACCEPTED_COST_FUNCTIONS}"
)
if more_than and less_than and more_than > less_than:
raise ValueError(
f"'more_than': {more_than} cannot be greater than 'less_than': "
f"{less_than}"
)
if ideal and more_than and ideal < more_than:
raise ValueError(
f"'ideal': {ideal} cannot be smaller than 'more_than': " f"{more_than}"
)
if ideal and less_than and ideal > less_than:
raise ValueError(
f"'ideal': {ideal} cannot be greater than 'less_than': " f"{less_than}"
)
for m in [from_dict, to_dict]:
if "mark" in m:
if isinstance(m["mark"], Container):
k = cont_string
elif isinstance(m["mark"], int):
k = inst_string
if m["mark"] < 0:
raise ValueError(
f"The instruction 'mark' in {m} must be greater "
f"than and equal to 0"
)
else:
raise TypeError(f"The 'mark' in {m} must be Container or Integer")
else:
raise KeyError(f"The {m} dict must contain `mark`")
if "state" in m:
if m["state"] in state_strings:
k += m["state"]
else:
raise TypeError(
f"The 'state' in {m} must be in " f"{', '.join(state_strings)}"
)
else:
raise KeyError(f"The {m} dict must contain 'state'")
keys.append(k)
if from_dict["mark"] == to_dict["mark"]:
if from_dict["state"] == to_dict["state"]:
raise RuntimeError(
f"The from_dict: {from_dict} and to_dict: {to_dict} are "
f"the same"
)
if from_dict["state"] == "end":
raise RuntimeError(
f"The from_dict: {from_dict} cannot come before the "
f"to_dict {to_dict}"
)
from_time_point = {keys[0]: from_dict["mark"]}
to_time_point = {keys[1]: to_dict["mark"]}
if less_than is not None:
self.time_constraints += [
{"from": from_time_point, "to": to_time_point, "less_than": less_than}
]
if mirror:
self.add_time_constraint(to_dict, from_dict, less_than, mirror=False)
if more_than is not None:
self.time_constraints += [
{"from": from_time_point, "to": to_time_point, "more_than": more_than}
]
if ideal is not None:
ideal_dict = dict(value=ideal)
if optimization_cost is not None:
ideal_dict["optimization_cost"] = optimization_cost
self.time_constraints += [
{"from": from_time_point, "to": to_time_point, "ideal": ideal_dict}
]
def get_instruction_index(self):
"""Get index of the last appended instruction
Example Usage:
.. code-block:: python
p = Protocol()
plate_1 = p.ref("plate_1", id=None, cont_type="96-flat",
discard=True)
p.cover(plate_1)
time_point_1 = p.get_instruction_index() # time_point_1 = 0
Raises
------
ValueError
If an instruction index is less than 0
Returns
-------
int
Index of the preceding instruction
"""
instruction_index = len(self.instructions) - 1
if instruction_index < 0:
raise ValueError("Instruction index less than 0")
return instruction_index
def _append_and_return(self, instructions):
"""
Append instruction(s) to the Protocol list and returns the
Instruction(s).
The other functions on Protocol() should be used
in lieu of doing this directly.
Example Usage:
.. code-block:: python
p = Protocol()
p._append_and_return(
Incubate("sample_plate", "ambient", "1:hour")
)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"duration": "1:hour",
"where": "ambient",
"object": "sample_plate",
"shaking": false,
"op": "incubate"
}
]
Parameters
----------
instructions : Instruction or list(Instruction)
Instruction object(s) to be appended.
Returns
-------
Instruction or list(Instruction)
Instruction object(s) to be appended and returned
"""
if type(instructions) is list:
self.instructions.extend(instructions)
else:
self.instructions.append(instructions)
return instructions
def batch_containers(self, containers, batch_in=True, batch_out=False):
"""
Batch containers such that they all enter or exit together.
Example Usage:
.. code-block:: python
plate_1 = protocol.ref("p1", None, "96-pcr", storage="cold_4")
plate_2 = protocol.ref("p2", None, "96-pcr", storage="cold_4")
protocol.batch_containers([plate_1, plate_2])
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"p1": {
"new": "96-pcr",
"store": {
"where": "cold_4"
}
},
"p2": {
"new": "96-pcr",
"store": {
"where": "cold_4"
}
}
},
"time_constraints": [
{
"from": {
"ref_start": "p1"
},
"less_than": "0:second",
"to": {
"ref_start": "p2"
}
},
{
"from": {
"ref_start": "p1"
},
"more_than": "0:second",
"to": {
"ref_start": "p2"
}
}
]
}
Parameters
----------
containers : list(Container)
Containers to batch
batch_in : bool, optional
Batch the entry of containers, default True
batch_out: bool, optional
Batch the exit of containers, default False
Raises
------
TypeError
If containers is not a list
TypeError
If containers is not a list of Container object
"""
time = Unit(0, "second")
if not isinstance(containers, list):
raise TypeError("batch_containers containers must be a list")
if not all(isinstance(cont, Container) for cont in containers):
raise TypeError("batch_containers containers must be a list of containers.")
if not batch_in and not batch_out or len(containers) < 2:
warnings.warn("batch_containers is used but has no effect")
reference_container = containers[0]
remainder_containers = containers[1:]
states = []
if batch_in:
states.append("start")
if batch_out:
states.append("end")
for container in remainder_containers:
for state in states:
from_dict = {"mark": reference_container, "state": state}
to_dict = {"mark": container, "state": state}
self.add_time_constraint(
from_dict=from_dict, to_dict=to_dict, less_than=time, more_than=time
)
def as_dict(self):
"""
Return the entire protocol as a dictionary.
Example Usage:
.. code-block:: python
from autoprotocol.protocol import Protocol
import json
p = Protocol()
sample_ref_2 = p.ref("sample_plate_2",
id="ct1cxae33lkj",
cont_type="96-pcr",
storage="ambient")
p.seal(sample_ref_2)
p.incubate(sample_ref_2, "warm_37", "20:minute")
print json.dumps(p.as_dict(), indent=2)
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"sample_plate_2": {
"id": "ct1cxae33lkj",
"store": {
"where": "ambient"
}
}
},
"instructions": [
{
"object": "sample_plate_2",
"op": "seal"
},
{
"duration": "20:minute",
"where": "warm_37",
"object": "sample_plate_2",
"shaking": false,
"op": "incubate"
}
]
}
Returns
-------
dict
dict with keys "refs" and "instructions" and optionally
"time_constraints" and "outs", each of which contain the
"refified" contents of their corresponding Protocol attribute.
Raises
------
RuntimeError
If either refs or instructions attribute is empty
"""
outs = {}
# pragma pylint: disable=protected-access
for n, ref in self.refs.items():
for well in ref.container._wells:
if well.name or len(well.properties) > 0:
if n not in outs.keys():
outs[n] = {}
outs[n][str(well.index)] = {}
if well.name:
outs[n][str(well.index)]["name"] = well.name
if len(well.properties) > 0:
outs[n][str(well.index)]["properties"] = well.properties
# assign any storage or discard condition changes to ref
if "store" in ref.opts:
ref.opts["store"] = {"where": ref.container.storage}
if ref.container.storage is None and "discard" not in ref.opts:
ref.opts["discard"] = True
del ref.opts["store"]
elif ref.container.storage is not None and "discard" in ref.opts:
ref.opts["store"] = {"where": ref.container.storage}
del ref.opts["discard"]
# pragma pylint: enable=protected-access
if outs:
setattr(self, "outs", outs)
prop_list = [
a
for a in dir(self)
if not a.startswith("__") and not callable(getattr(self, a))
]
explicit_props = ["outs", "refs", "instructions"]
# attributes that are always serialized.
optional_props = ["time_constraints"]
# optional attributes that are serialized only when there are values
for prop in optional_props:
if getattr(self, prop):
explicit_props.append(prop)
return {
attr: self._refify(getattr(self, attr))
for attr in prop_list
if attr in explicit_props
}
def store(self, container, condition):
"""
Manually adjust the storage destiny for a container used within
this protocol.
Parameters
----------
container : Container
Container used within this protocol
condition : str
New storage destiny for the specified Container
Raises
------
TypeError
If container argument is not a Container object
RuntimeError
If the container passed is not already present in self.refs
"""
if not condition or condition == "discard":
condition = None
if not isinstance(container, Container):
raise TypeError("Protocol.store() can only be used on a Container object.")
container.storage = condition
r = self.refs.get(container.name)
if not r:
raise RuntimeError(
"That container does not exist in the refs for this protocol."
)
if "discard" in r.opts:
r.opts.pop("discard")
if condition:
r.opts["store"] = {"where": str(condition)}
else:
r.opts.pop("store")
r.opts["discard"] = True
def acoustic_transfer(
self, source, dest, volume, one_source=False, droplet_size="25:nanoliter"
):
"""
Specify source and destination wells for transferring liquid via an
acoustic liquid handler. Droplet size is usually device-specific.
Example Usage:
.. code-block:: python
p.acoustic_transfer(
echo.wells(0,1).set_volume("12:nanoliter"),
plate.wells_from(0,5), "4:nanoliter", one_source=True)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
{
"transfer": [
{
"volume": "0.004:microliter",
"to": "plate/0",
"from": "echo_plate/0"
},
{
"volume": "0.004:microliter",
"to": "plate/1",
"from": "echo_plate/0"
},
{
"volume": "0.004:microliter",
"to": "plate/2",
"from": "echo_plate/0"
},
{
"volume": "0.004:microliter",
"to": "plate/3",
"from": "echo_plate/1"
},
{
"volume": "0.004:microliter",
"to": "plate/4",
"from": "echo_plate/1"
}
]
}
],
"droplet_size": "25:microliter",
"op": "acoustic_transfer"
}
]
Parameters
----------
source : Well or WellGroup or list(Well)
Well or wells to transfer liquid from. If multiple source wells
are supplied and one_source is set to True, liquid will be
transferred from each source well specified as long as it contains
sufficient volume. Otherwise, the number of source wells specified
must match the number of destination wells specified and liquid
will be transferred from each source well to its corresponding
destination well.
dest : Well or WellGroup or list(Well)
Well or WellGroup to which to transfer liquid. The number of
destination wells must match the number of source wells specified
unless one_source is set to True.
volume : str or Unit or list
The volume(s) of liquid to be transferred from source wells to
destination wells. Volume can be specified as a single string or
Unit, or can be given as a list of volumes. The length of a list
of volumes must match the number of destination wells given unless
the same volume is to be transferred to each destination well.
one_source : bool, optional
Specify whether liquid is to be transferred to destination wells
from a group of wells all containing the same substance.
droplet_size : str or Unit, optional
Volume representing a droplet_size. The volume of each `transfer`
group should be a multiple of this volume.
Returns
-------
AcousticTransfer
Returns the :py:class:`autoprotocol.instruction.AcousticTransfer`
instruction created from the specified parameters
Raises
------
TypeError
Incorrect input types, e.g. source/dest are not Well or WellGroup
or list of Well
RuntimeError
Incorrect length for source and destination
RuntimeError
Transfer volume not being a multiple of droplet size
RuntimeError
Insufficient volume in source wells
"""
# Check valid well inputs
if not is_valid_well(source):
raise TypeError("Source must be of type Well, list of Wells, or WellGroup.")
if not is_valid_well(dest):
raise TypeError(
"Destination (dest) must be of type Well, list of Wells, or "
"WellGroup."
)
transfers = []
source = WellGroup(source)
dest = WellGroup(dest)
len_source = len(source.wells)
len_dest = len(dest.wells)
droplet_size = Unit(droplet_size)
max_decimal_places = 12 # for rounding after floating point arithmetic
# Auto-generate well-group if only 1 well specified and using >1 source
if not one_source:
if len_dest > 1 and len_source == 1:
source = WellGroup(source.wells * len_dest)
len_source = len(source.wells)
if len_dest == 1 and len_source > 1:
dest = WellGroup(dest.wells * len_source)
len_dest = len(dest.wells)
if len_source != len_dest:
raise RuntimeError(
"To transfer liquid from one well or multiple wells "
"containing the same source, set one_source to True. To "
"transfer liquid from multiple wells to a single "
"destination well, specify only one destination well. "
"Otherwise, you must specify the same number of source and "
"destination wells to do a one-to-one transfer."
)
# Auto-generate list from single volume, check if list length matches
if isinstance(volume, str) or isinstance(volume, Unit):
if len_dest == 1 and not one_source:
volume = [Unit(volume).to("ul")] * len_source
else:
volume = [Unit(volume).to("ul")] * len_dest
elif isinstance(volume, list) and len(volume) == len_dest:
volume = list(map(lambda x: Unit(x).to("ul"), volume))
else:
raise RuntimeError(
"Unless the same volume of liquid is being transferred to each "
"destination well, each destination well must have a "
"corresponding volume in the form of a list."
)
vol_errors = []
for vol_d in volume:
if not round(vol_d / droplet_size, max_decimal_places) % 1 == 0:
vol_errors.append(vol_d)
if len(vol_errors) > 0:
raise RuntimeError(
f"Transfer volume has to be a multiple of the droplet size ({droplet_size}).This is not true for the following volumes: {vol_errors}"
)
# Ensure enough volume in single well to transfer to all dest wells
if one_source:
try:
source_vol = [s.available_volume() for s in source.wells]
if sum([a for a in volume]) > sum([a for a in source_vol]):
raise RuntimeError(
"There is not enough volume in the source well(s) "
"specified to complete the transfers."
)
if len_source >= len_dest and all(
i > j for i, j in zip(source_vol, volume)
):
sources = source.wells[:len_dest]
destinations = dest.wells
volumes = volume
else:
sources = []
source_counter = 0
destinations = []
volumes = []
s = source.wells[source_counter]
vol = s.available_volume()
for idx, d in enumerate(dest.wells):
vol_d = volume[idx]
while vol_d > Unit("0:microliter"):
if vol > vol_d:
sources.append(s)
destinations.append(d)
volumes.append(vol_d)
vol -= vol_d
vol = round(vol, max_decimal_places)
vol_d -= vol_d
vol_d = round(vol_d, max_decimal_places)
else:
sources.append(s)
destinations.append(d)
vol = int(vol / droplet_size) * droplet_size
volumes.append(vol)
vol_d -= vol
vol_d = round(vol_d, max_decimal_places)
source_counter += 1
if source_counter < len_source:
s = source.wells[source_counter]
vol = s.available_volume()
source = WellGroup(sources)
dest = WellGroup(destinations)
volume = volumes
except (ValueError, AttributeError, TypeError):
raise RuntimeError(
"When transferring liquid from multiple wells containing "
"the same substance to multiple other wells, each source "
"Well must have a volume attribute (aliquot) associated "
"with it."
)
for s, d, v in list(zip(source.wells, dest.wells, volume)):
self._remove_cover(s.container, "acoustic_transfer")
self._remove_cover(d.container, "acoustic_transfer")
xfer = {"from": s, "to": d, "volume": v}
# Volume accounting
if d.volume:
d.volume += v
else:
d.volume = v
if s.volume:
s.volume -= v
if v > Unit(0, "microliter"):
transfers.append(xfer)
if not transfers:
raise RuntimeError(
"At least one transfer must have a nonzero transfer volume."
)
for x in transfers:
x["volume"] = round(x["volume"].to("nl"), max_decimal_places)
return self._append_and_return(
AcousticTransfer([{"transfer": transfers}], droplet_size)
)
def illuminaseq(
self,
flowcell,
lanes,
sequencer,
mode,
index,
library_size,
dataref,
cycles=None,
):
"""
Load aliquots into specified lanes for Illumina sequencing.
The specified aliquots should already contain the appropriate mix for
sequencing and require a library concentration reported in
ng/uL.
Example Usage:
.. code-block:: python
p = Protocol()
sample_wells = p.ref(
"test_plate", None, "96-pcr", discard=True).wells_from(0, 8)
p.illuminaseq(
"PE",
[
{"object": sample_wells[0], "library_concentration": 1.0},
{"object": sample_wells[1], "library_concentration": 5.32},
{"object": sample_wells[2], "library_concentration": 54},
{"object": sample_wells[3], "library_concentration": 20},
{"object": sample_wells[4], "library_concentration": 23},
{"object": sample_wells[5], "library_concentration": 23},
{"object": sample_wells[6], "library_concentration": 21},
{"object": sample_wells[7], "library_concentration": 62}
],
"hiseq", "rapid", 'none', 250, "my_illumina")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "my_illumina",
"index": "none",
"lanes": [
{
"object": "test_plate/0",
"library_concentration": 1
},
{
"object": "test_plate/1",
"library_concentration": 5.32
},
{
"object": "test_plate/2",
"library_concentration": 54
},
{
"object": "test_plate/3",
"library_concentration": 20
},
{
"object": "test_plate/4",
"library_concentration": 23
},
{
"object": "test_plate/5",
"library_concentration": 23
},
{
"object": "test_plate/6",
"library_concentration": 21
},
{
"object": "test_plate/7",
"library_concentration": 62
}
],
"flowcell": "PE",
"mode": "mid",
"sequencer": "hiseq",
"library_size": 250,
"op": "illumina_sequence"
}
]
Parameters
----------
flowcell : str
Flowcell designation: "SR" or " "PE"
lanes : list(dict)
.. code-block:: none
"lanes": [
{
"object": aliquot, Well,
"library_concentration": decimal, // ng/uL
},
{...}]
sequencer : str
Sequencer designation: "miseq", "hiseq" or "nextseq"
mode : str
Mode designation: "rapid", "mid" or "high"
index : str
Index designation: "single", "dual" or "none"
library_size: int
Library size expressed as an integer of basepairs
dataref : str
Name of sequencing dataset that will be returned.
cycles : Enum({"read_1", "read_2", "index_1", "index_2"})
Parameter specific to Illuminaseq read-length or number of
sequenced bases. Refer to the ASC for more details
Returns
-------
IlluminaSeq
Returns the :py:class:`autoprotocol.instruction.IlluminaSeq`
instruction created from the specified parameters
Raises
------
TypeError
If index and dataref are not of type str.
TypeError
If library_concentration is not a number.
TypeError
If library_size is not an integer.
ValueError
If flowcell, sequencer, mode, index are not of type a valid option.
ValueError
If number of lanes specified is more than the maximum lanes of the
specified type of sequencer.
KeyError
Invalid keys specified for cycles parameter
"""
valid_flowcells = ["PE", "SR"]
# currently available sequencers, modes and max number of lanes and
# cycles
valid_sequencers = {
"miseq": {"max_lanes": 1, "modes": ["high"], "max_cycles_read": 600},
"hiseq": {
"max_lanes": 8,
"modes": ["high", "rapid"],
"max_cycles_read": 500,
},
"nextseq": {
"max_lanes": 4,
"modes": ["high", "mid"],
"max_cycles_read": 300,
},
}
valid_indices = ["single", "dual", "none"]
valid_cycles = ["index_1", "index_2", "read_1", "read_2"]
max_cycles_ind = 12
if flowcell not in valid_flowcells:
raise ValueError(
f"Illumina sequencing flowcell type must be one "
f"of: {', '.join(valid_flowcells)}."
)
if sequencer not in valid_sequencers.keys():
raise ValueError(
f"Illumina sequencer must be one of: "
f"{', '.join(valid_sequencers.keys())}."
)
if not isinstance(lanes, list):
raise TypeError("Illumina sequencing lanes must be a list(dict)")
for l in lanes:
if not isinstance(l, dict):
raise TypeError("Illumina sequencing lanes must be a list(dict)")
if not all(k in l.keys() for k in ["object", "library_concentration"]):
raise TypeError(
"Each Illumina sequencing lane must contain an "
"'object' and a 'library_concentration'"
)
if not isinstance(l["object"], Well):
raise TypeError(
"Each Illumina sequencing object must be of " "type Well"
)
if not isinstance(l["library_concentration"], (float, int)):
raise TypeError(
"Each Illumina sequencing "
"library_concentration must be a number."
)
if len(lanes) > valid_sequencers[sequencer]["max_lanes"]:
raise ValueError(
f"The type of sequencer selected ({sequencer}) only has {valid_sequencers[sequencer]['max_lanes']} lane(s). You specified {len(lanes)}. Please submit additional Illumina Sequencing instructions."
)
if mode not in valid_sequencers[sequencer]["modes"]:
raise ValueError(
f"The type of sequencer selected ({sequencer}) has valid modes: {', '.join(valid_sequencers[sequencer]['modes'])}.You specified: {mode}."
)
if index not in valid_indices:
raise ValueError(
f"Illumina sequencing index must be one of: "
f"{', '.join(valid_indices)}."
)
if not isinstance(dataref, str):
raise TypeError(f"dataref: {dataref}, must be a string")
if not isinstance(library_size, int):
raise TypeError(f"library_size: {library_size}, must be an integer.")
if cycles:
if not isinstance(cycles, dict):
raise TypeError("Cycles must be a dict.")
if not all(c in valid_cycles for c in cycles.keys()):
raise KeyError(
f"Valid cycle parameters are: " f"{', '.join(valid_cycles)}"
)
if "read_1" not in cycles.keys():
raise ValueError("If specifying cycles, 'read_1' must be designated.")
if flowcell == "SR" and "read_2" in cycles.keys():
raise ValueError("SR does not have a second read: 'read_2'.")
if not all(isinstance(i, int) for i in cycles.values()):
raise ValueError("Cycles must be specified as an integer.")
for read in ["read_1", "read_2"]:
if cycles.get(read):
if cycles[read] > valid_sequencers[sequencer]["max_cycles_read"]:
raise ValueError(
f"The maximum number of cycles for {read} is {valid_sequencers[sequencer]['max_cycles_read']}."
)
for ind in ["index_1", "index_2"]:
if cycles.get(ind):
if cycles[ind] > max_cycles_ind:
raise ValueError(
f"The maximum number of cycles for {ind} is "
f"{max_cycles_ind}."
)
# set index 1 and 2 to default 0 if not otherwise specified
else:
cycles[ind] = 0
return self._append_and_return(
IlluminaSeq(
flowcell, lanes, sequencer, mode, index, library_size, dataref, cycles
)
)
# pylint: disable=redefined-builtin
def sangerseq(self, cont, wells, dataref, type="standard", primer=None):
"""
Send the indicated wells of the container specified for Sanger
sequencing.
The specified wells should already contain the appropriate mix for
sequencing, including primers and DNA according to the instructions
provided by the vendor.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.sangerseq(sample_plate,
sample_plate.wells_from(0,5).indices(),
"seq_data_022415")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "seq_data_022415",
"object": "sample_plate",
"wells": [
"A1",
"A2",
"A3",
"A4",
"A5"
],
"op": "sanger_sequence"
}
]
Parameters
----------
cont : Container or str
Container with well(s) that contain material to be sequenced.
wells : list(Well) or WellGroup or Well
WellGroup of wells to be measured or a list of well references in
the form of ["A1", "B1", "C5", ...]
dataref : str
Name of sequencing dataset that will be returned.
type: Enum({"standard", "rca"})
Sanger sequencing type
primer : Container, optional
Tube containing sufficient primer for all RCA reactions. This field
will be ignored if you specify the sequencing type as "standard".
Tube containing sufficient primer for all RCA reactions
Returns
-------
SangerSeq
Returns the :py:class:`autoprotocol.instruction.SangerSeq`
instruction created from the specified parameters
Raises
------
RuntimeError
No primer location specified for rca sequencing type
ValueError
Wells belong to more than one container
TypeError
Invalid input type for wells
"""
seq_type = type.lower()
if seq_type == "rca" and not primer:
raise RuntimeError(
"You must specify the location of primer for "
"RCA sequencing reactions."
)
if isinstance(wells, Well):
wells = WellGroup(wells)
if isinstance(wells, WellGroup):
container = set([w.container for w in wells])
if len(container) > 1:
raise ValueError("All wells need to be on one container for SangerSeq")
wells = [str(w.index) for w in wells]
if not isinstance(wells, list):
raise TypeError(
"Unknown input. SangerSeq wells accepts either a"
"Well, a WellGroup, or a list of well indices"
)
return self._append_and_return(SangerSeq(cont, wells, dataref, type, primer))
def dispense(
self,
ref,
reagent,
columns,
is_resource_id=False,
step_size="5:uL",
flowrate=None,
nozzle_position=None,
pre_dispense=None,
shape=None,
shake_after=None,
):
"""
Dispense specified reagent to specified columns.
Example Usage:
.. code-block:: python
from autoprotocol.liquid_handle.liquid_handle_builders import *
from autoprotocol.instructions import Dispense
from autoprotocol import Protocol
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.dispense(sample_plate,
"water",
Dispense.builders.columns(
[Dispense.builders.column(0, "10:uL"),
Dispense.builders.column(1, "20:uL"),
Dispense.builders.column(2, "30:uL"),
Dispense.builders.column(3, "40:uL"),
Dispense.builders.column(4, "50:uL")
])
)
p.dispense(
sample_plate,
"water",
Dispense.builders.columns(
[Dispense.builders.column(0, "10:uL")]
),
Dispense.builders.nozzle_position(
position_x=Unit("1:mm"),
position_y=Unit("2:mm"),
position_z=Unit("20:mm")
),
shape_builder(
rows=8, columns=1, format="SBS96"
)
)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"reagent": "water",
"object": "sample_plate",
"columns": [
{
"column": 0,
"volume": "10:microliter"
},
{
"column": 1,
"volume": "20:microliter"
},
{
"column": 2,
"volume": "30:microliter"
},
{
"column": 3,
"volume": "40:microliter"
},
{
"column": 4,
"volume": "50:microliter"
}
],
"op": "dispense"
},
{
"reagent": "water",
"object": "sample_plate",
"columns": [
{
"column": 0,
"volume": "10:microliter"
}
],
"nozzle_position" : {
"position_x" : "1:millimeter",
"position_y" : "2:millimeter",
"position_z" : "20:millimeter"
},
"shape" : {
"rows" : 8,
"columns" : 1,
"format" : "SBS96"
}
"op": "dispense"
},
]
Parameters
----------
ref : Container
Container for reagent to be dispensed to.
reagent : str or well
Reagent to be dispensed. Use a string to specify the name or
resource_id (see below) of the reagent to be dispensed.
Alternatively, use a well to specify that the dispense operation
must be executed using a specific aliquot as the dispense source.
columns : list(dict("column": int, "volume": str/Unit))
Columns to be dispensed to, in the form of a list(dict)
specifying the column number and the volume to be dispensed to that
column. Columns are expressed as integers indexed from 0.
[{"column": <column num>, "volume": <volume>}, ...]
is_resource_id : bool, optional
If true, interprets reagent as a resource ID
step_size : str or Unit, optional
Specifies that the dispense operation must be executed
using a peristaltic pump with the given step size. Note
that the volume dispensed in each column must be an integer
multiple of the step_size. Currently, step_size must be either
5 uL or 0.5 uL. If set to None, will use vendor specified defaults.
flowrate : str or Unit, optional
The rate at which liquid is dispensed into the ref in units
of volume/time.
nozzle_position : dict, optional
A dict represent nozzle offsets from the bottom middle of the
plate's wells. see Dispense.builders.nozzle_position; specified as
{"position_x": Unit, "position_y": Unit, "position_z": Unit}.
pre_dispense : str or Unit, optional
The volume of reagent to be dispensed per-nozzle into waste
immediately prior to dispensing into the ref.
shape: dict, optional
The shape of the dispensing head to be used for the dispense.
See liquid_handle_builders.shape_builder; specified as
{"rows": int, "columns": int, "format": str} with format being a
valid SBS format.
shake_after: dict, optional
Parameters that specify how a plate should be shaken at the very
end of the instruction execution. See Dispense.builders.shake_after.
Returns
-------
Dispense
Returns the :py:class:`autoprotocol.instruction.Dispense`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. ref is not of type Container
ValueError
Columns specified is invalid for this container type
ValueError
Invalid step-size given
ValueError
Invalid pre-dispense volume
"""
_VALID_STEP_SIZES = [Unit(5, "uL"), Unit(0.5, "uL")]
_DEFAULT_NOZZLE_COUNT = 8
if not isinstance(ref, Container):
raise TypeError(f"ref must be a Container but it was {type(ref)}.")
columns = Dispense.builders.columns(columns)
ref_cols = list(range(ref.container_type.col_count))
if not all(_["column"] in ref_cols for _ in columns):
raise ValueError(
f"Specified dispense columns: {columns} contains a column index that is outside of the valid columns: {ref_cols} for ref: {ref}."
)
# pre-evaluate all parameters before making any changes to the Protocol
if flowrate is not None:
flowrate = parse_unit(flowrate, "uL/s")
if nozzle_position is not None:
nozzle_position = Dispense.builders.nozzle_position(**nozzle_position)
if pre_dispense is not None:
pre_dispense = parse_unit(pre_dispense, "uL")
if shape is not None:
shape = Dispense.builders.shape(**shape)
if shake_after is not None:
shake_after = Dispense.builders.shake_after(**shake_after)
nozzle_count = (
shape["rows"] * shape["columns"] if shape else _DEFAULT_NOZZLE_COUNT
)
if step_size is not None:
step_size = parse_unit(step_size, "uL")
if step_size not in _VALID_STEP_SIZES:
raise ValueError(
f"specified step_size was {step_size} but it must be in "
f"{_VALID_STEP_SIZES}"
)
for c in columns:
if c["volume"] % step_size != Unit("0:uL"):
raise ValueError(
f"Dispense volume must be a multiple of the step size {step_size}, but column {c} does not meet these requirements."
)
if pre_dispense is not None:
invalid_pre_dispense_range = pre_dispense < 2 * step_size
if invalid_pre_dispense_range and pre_dispense != Unit(0, "uL"):
raise ValueError(
f"Dispense pre_dispense must either be 0:uL or at least 2 * step_size: {step_size} but it was {pre_dispense}."
)
if pre_dispense % step_size != Unit(0, "uL"):
raise ValueError(
f"Dispense pre_dispense must be a multiple of step_size: {step_size} but it was {pre_dispense}."
)
row_count = ref.container_type.row_count()
if isinstance(reagent, Well):
self._remove_cover(reagent.container, "dispense from")
# Volume accounting
total_vol_dispensed = sum([Unit(c["volume"]) for c in columns]) * row_count
if pre_dispense is not None:
total_vol_dispensed += nozzle_count * pre_dispense
if reagent.volume:
reagent.volume -= total_vol_dispensed
else:
reagent.volume = -total_vol_dispensed
reagent, resource_id, reagent_source = None, None, reagent
else:
if not isinstance(reagent, str):
raise TypeError(
f"reagent: {reagent} must be a Well or string but it was: "
f"{type(reagent)}."
)
if is_resource_id:
reagent, resource_id, reagent_source = None, reagent, None
else:
# reagent remains unchanged
resource_id, reagent_source = None, None
self._remove_cover(ref, "dispense to")
for c in columns:
wells = ref.wells_from(c["column"], row_count, columnwise=True)
for w in wells:
if w.volume:
w.volume += c["volume"]
else:
w.volume = c["volume"]
return self._append_and_return(
Dispense(
object=ref,
columns=columns,
reagent=reagent,
resource_id=resource_id,
reagent_source=reagent_source,
step_size=step_size,
flowrate=flowrate,
nozzle_position=nozzle_position,
pre_dispense=pre_dispense,
shape=shape,
shake_after=shake_after,
)
)
def dispense_full_plate(
self,
ref,
reagent,
volume,
is_resource_id=False,
step_size="5:uL",
flowrate=None,
nozzle_position=None,
pre_dispense=None,
shape=None,
shake_after=None,
):
"""
Dispense the specified amount of the specified reagent to every well
of a container using a reagent dispenser.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.dispense_full_plate(sample_plate,
"water",
"100:microliter")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"reagent": "water",
"object": "sample_plate",
"columns": [
{
"column": 0,
"volume": "100:microliter"
},
{
"column": 1,
"volume": "100:microliter"
},
{
"column": 2,
"volume": "100:microliter"
},
{
"column": 3,
"volume": "100:microliter"
},
{
"column": 4,
"volume": "100:microliter"
},
{
"column": 5,
"volume": "100:microliter"
},
{
"column": 6,
"volume": "100:microliter"
},
{
"column": 7,
"volume": "100:microliter"
},
{
"column": 8,
"volume": "100:microliter"
},
{
"column": 9,
"volume": "100:microliter"
},
{
"column": 10,
"volume": "100:microliter"
},
{
"column": 11,
"volume": "100:microliter"
}
],
"op": "dispense"
}
]
Parameters
----------
ref : Container
Container for reagent to be dispensed to.
reagent : str or Well
Reagent to be dispensed. Use a string to specify the name or
resource_id (see below) of the reagent to be dispensed.
Alternatively, use a well to specify that the dispense operation
must be executed using a specific aliquot as the dispense source.
volume : Unit or str
Volume of reagent to be dispensed to each well
is_resource_id : bool, optional
If true, interprets reagent as a resource ID
step_size : str or Unit, optional
Specifies that the dispense operation must be executed
using a peristaltic pump with the given step size. Note
that the volume dispensed in each column must be an integer
multiple of the step_size. Currently, step_size must be either
5 uL or 0.5 uL. If set to None, will use vendor specified defaults.
flowrate : str or Unit, optional
The rate at which liquid is dispensed into the ref in units
of volume/time.
nozzle_position : dict, optional
A dict represent nozzle offsets from the bottom middle of the
plate's wells. see Dispense.builders.nozzle_position; specified as
{"position_x": Unit, "position_y": Unit, "position_z": Unit}.
pre_dispense : str or Unit, optional
The volume of reagent to be dispensed per-nozzle into waste
immediately prior to dispensing into the ref.
shape: dict, optional
The shape of the dispensing head to be used for the dispense.
See liquid_handle_builders.shape_builder; specified as
{"rows": int, "columns": int, "format": str} with format being a
valid SBS format.
shake_after: dict, optional
Parameters that specify how a plate should be shaken at the very
end of the instruction execution. See Dispense.builders.shake_after.
Returns
-------
Dispense
Returns the :py:class:`autoprotocol.instruction.Dispense`
instruction created from the specified parameters
"""
columns = Dispense.builders.columns(
[
{"column": col, "volume": volume}
for col in range(ref.container_type.col_count)
]
)
return self.dispense(
ref,
reagent,
columns,
is_resource_id,
step_size,
flowrate,
nozzle_position,
pre_dispense,
shape,
shake_after,
)
def spin(
self, ref, acceleration, duration, flow_direction=None, spin_direction=None
):
"""
Apply acceleration to a container.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.spin(sample_plate, "1000:g", "20:minute", flow_direction="outward")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"acceleration": "1000:g",
"duration": "20:minute",
"flow_direction": "outward",
"spin_direction": [
"cw",
"ccw"
]
"object": "sample_plate",
"op": "spin"
}
]
Parameters
----------
ref : Container
The container to be centrifuged.
acceleration: str
Acceleration to be applied to the plate, in units of `g` or
`meter/second^2`.
duration: str or Unit
Length of time that acceleration should be applied.
flow_direction: str
Specifies the direction contents will tend toward with respect to
the container. Valid directions are "inward" and "outward", default
value is "inward".
spin_direction: list(str)
A list of "cw" (clockwise), "cww" (counterclockwise). For each
element in the list, the container will be spun in the stated
direction for the set "acceleration" and "duration". Default values
are derived from the "flow_direction" parameter. If
"flow_direction" is "outward", then "spin_direction" defaults to
["cw", "ccw"]. If "flow_direction" is "inward", then
"spin_direction" defaults to ["cw"].
Returns
-------
Spin
Returns the :py:class:`autoprotocol.instruction.Spin`
instruction created from the specified parameters
Raises
------
TypeError
If ref to spin is not of type Container.
TypeError
If spin_direction or flow_direction are not properly formatted.
ValueError
If spin_direction or flow_direction do not have appropriate values.
"""
if flow_direction is not None and flow_direction not in ["inward", "outward"]:
raise ValueError(
"The specified value for flow_direction was not valid. If "
"specifying, please choose either 'inward' or 'outward'"
)
default_directions = {"inward": ["cw"], "outward": ["cw", "ccw"]}
if spin_direction is None and flow_direction:
spin_direction = default_directions[flow_direction]
if spin_direction is not None:
if not isinstance(spin_direction, list):
raise TypeError("Spin directions must be in the form of a list.")
if len(spin_direction) is 0:
raise ValueError(
"Spin direction must be a list containing at least one "
"spin direction ('cw', 'ccw')"
)
if spin_direction and not all(s in ["cw", "ccw"] for s in spin_direction):
raise ValueError("Spin directions must be 'cw' or 'ccw'.")
try:
duration = Unit(duration)
except ValueError as e:
raise ValueError(f"Duration must be a unit. {e}")
if not isinstance(ref, Container):
raise TypeError("Ref must be of type Container.")
if not flow_direction or flow_direction == "inward":
self._add_cover(ref, "inward spin")
elif flow_direction == "outward":
self._remove_cover(ref, "outward spin")
return self._append_and_return(
Spin(ref, acceleration, duration, flow_direction, spin_direction)
)
def agitate(self, ref, mode, speed, duration, temperature=None, mode_params=None):
"""
Agitate a container in a specific condition for a given duration. If
temperature is not specified, container is agitated at ambient
temperature by default.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("test pcr plate", id=None, cont_type="96-pcr",
storage="cold_4")
p.agitate(
ref = plate,
mode="vortex",
speed="1000:rpm",
duration="5:minute",
temperature="25:celsius"
)
Autoprotocol Output:
.. code-block:: none
"instructions" : [
{
"object": "test pcr plate",
"mode": "vortex",
"speed": "1000:rpm",
"duration": "5:minute",
"temperature": "25:celsius",
"op": "agitate"
}
]
Parameters
----------
ref : Container
Container to be agitated
mode : str
Mode by which to agitate container
speed : str or Unit
Speed at which to agitate container
duration : str or Unit
Specify the duration to agitate for
temperature : Unit or str, optional
Specify target temperature to agitate container at.
Defaults to ambient
mode_params : dict, optional
Dictionary containing mode params for agitation modes
Returns
-------
Agitate
returns a :py:class:`autoprotocol.instruction.Agitate`
instruction created from the specified parameters
Raises
------
ValueError
If ref provided is not of type Container
ValueError
If speed is less than 0 rpm
ValueError
if duration is less than 0 minutes
ValueError
If `mode_params` not specified for mode `stir_bar`
ValueError
If valid keys for `mode_params` used for `stir_bar` are not included
ValueError
If wells specified in `mode_params` are not in the same container
ValueError
If `bar_shape` is not valid
ValueError
If `bar_length` is less than 0 millimeter
ValueError
If `mode` used does not require `mode_params`
TypeError
If ref cannot be undergo agitate mode `roll` or `invert`
"""
valid_modes = ["vortex", "invert", "roll", "stir_bar"]
valid_bar_shapes = ["bar", "cross"]
valid_bar_mode_params = ["wells", "bar_shape", "bar_length"]
speed = parse_unit(speed)
temperature = parse_unit(temperature, "celsius") if temperature else None
duration = parse_unit(duration, "minute")
if not isinstance(ref, Container):
raise ValueError("Ref is not of type Container.")
if speed <= Unit("0:rpm"):
raise ValueError(f"Speed: {speed} must be more than 0 rpm.")
if duration <= Unit("0:minute"):
raise ValueError(f"Duration: {duration} must be longer than 0 minutes.")
if mode not in valid_modes:
raise ValueError(f"Agitate mode must be one of {valid_modes}")
if mode == "stir_bar":
if mode_params is None:
raise ValueError(
"Dictionary `mode_params` must be specified for the "
"mode `stir_bar`"
)
elif not set(mode_params.keys()) == set(valid_bar_mode_params):
raise ValueError(
f"Params for `stir_bar` must include " f"{valid_bar_mode_params}"
)
wells = WellGroup(mode_params["wells"])
container = set([w.container for w in wells])
shape = mode_params["bar_shape"]
length = parse_unit(mode_params["bar_length"], "millimeter")
if len(container) > 1:
raise ValueError(
"All wells need to be on the same container for Agitate"
)
if shape not in valid_bar_shapes:
raise ValueError(f"Param `bar_shape` must be one of {valid_bar_shapes}")
if length <= Unit(0, "millimeter"):
raise ValueError(
"Params `bar_length` must be greater than 0 millimeter"
)
elif mode != "stir_bar" and mode_params:
raise ValueError(f"Mode {mode} does not need mode_params specified")
elif mode in ["invert", "roll"] and not ref.container_type.is_tube:
raise TypeError(f"Specified container {ref} cannot be inverted or rolled.")
return self._append_and_return(
Agitate(ref, mode, speed, duration, temperature, mode_params)
)
def thermocycle(
self,
ref,
groups,
volume="10:microliter",
dataref=None,
dyes=None,
melting_start=None,
melting_end=None,
melting_increment=None,
melting_rate=None,
lid_temperature=None,
):
"""
Append a Thermocycle instruction to the list of instructions, with
groups is a list(dict) in the form of:
.. code-block:: python
"groups": [{
"cycles": integer,
"steps": [
{
"duration": duration,
"temperature": temperature,
"read": boolean // optional (default false)
},
{
"duration": duration,
"gradient": {
"top": temperature,
"bottom": temperature
},
"read": boolean // optional (default false)
}
]
}],
Thermocycle can also be used for either conventional or row-wise
gradient PCR as well as qPCR. Refer to the examples below for details.
Example Usage:
To thermocycle a container according to the protocol:
* 1 cycle:
* 95 degrees for 5 minutes
* 30 cycles:
* 95 degrees for 30 seconds
* 56 degrees for 20 seconds
* 72 degrees for 30 seconds
* 1 cycle:
* 72 degrees for 10 minutes
* 1 cycle:
* 4 degrees for 30 seconds
* all cycles: Lid temperature at 97 degrees
.. code-block:: python
from instruction import Thermocycle
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
# a plate must be sealed before it can be thermocycled
p.seal(sample_plate)
p.thermocycle(
sample_plate,
[
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("95:celsius", "5:minute")
]
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("95:celsius", "30:s"),
Thermocycle.builders.step("56:celsius", "20:s"),
Thermocycle.builders.step("72:celsius", "20:s"),
],
cycles=30
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("72:celsius", "10:minute")
]
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("4:celsius", "30:s")
]
)
],
lid_temperature="97:celsius"
)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"object": "sample_plate",
"op": "seal"
},
{
"volume": "10:microliter",
"dataref": null,
"object": "sample_plate",
"groups": [
{
"cycles": 1,
"steps": [
{
"duration": "5:minute",
"temperature": "95:celsius"
}
]
},
{
"cycles": 30,
"steps": [
{
"duration": "30:second",
"temperature": "95:celsius"
},
{
"duration": "20:second",
"temperature": "56:celsius"
},
{
"duration": "20:second",
"temperature": "72:celsius"
}
]
},
{
"cycles": 1,
"steps": [
{
"duration": "10:minute",
"temperature": "72:celsius"
}
]
},
{
"cycles": 1,
"steps": [
{
"duration": "30:second",
"temperature": "4:celsius"
}
]
}
],
"op": "thermocycle"
}
]
To gradient thermocycle a container according to the protocol:
* 1 cycle:
* 95 degrees for 5 minutes
* 30 cycles:
* 95 degrees for 30 seconds
Top Row:
* 65 degrees for 20 seconds
Bottom Row:
* 55 degrees for 20 seconds
* 72 degrees for 30 seconds
* 1 cycle:
* 72 degrees for 10 minutes
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
# a plate must be sealed before it can be thermocycled
p.seal(sample_plate)
p.thermocycle(
sample_plate,
[
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("95:celsius", "5:minute")
]
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("95:celsius", "30:s"),
Thermocycle.builders.step(
{"top": "65:celsius", "bottom": "55:celsius"},
"20:s"
),
Thermocycle.builders.step("72:celsius", "20:s"),
],
cycles=30
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("72:celsius", "10:minute")
]
)
]
)
To conduct a qPCR, at least one dye type and the dataref field has to
be specified.
The example below uses SYBR dye and the following temperature profile:
* 1 cycle:
* 95 degrees for 3 minutes
* 40 cycles:
* 95 degrees for 10 seconds
* 60 degrees for 30 seconds (Read during extension)
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
# a plate must be sealed before it can be thermocycled
p.seal(sample_plate)
p.thermocycle(
sample_plate,
[
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step("95:celsius", "3:minute")
]
),
Thermocycle.builders.group(
steps=[
Thermocycle.builders.step(
"95:celsius",
"10:second",
read=False
),
Thermocycle.builders.step(
"95:celsius",
"10:second",
read=True
)
],
cycles=40
)
],
dataref = "my_qpcr_data",
dyes = {"SYBR": sample_plate.all_wells().indices()}
)
Parameters
----------
ref : Container
Container to be thermocycled.
groups : list(dict)
List of thermocycling instructions formatted as above
volume : str or Unit, optional
Volume contained in wells being thermocycled
dataref : str, optional
Name of dataref representing read data if performing qPCR
dyes : dict, optional
Dictionary mapping dye types to the wells they're used in
melting_start: str or Unit, optional
Temperature at which to start the melting curve.
melting_end: str or Unit, optional
Temperature at which to end the melting curve.
melting_increment: str or Unit, optional
Temperature by which to increment the melting curve. Accepted
increment values are between 0.1 and 9.9 degrees celsius.
melting_rate: str or Unit, optional
Specifies the duration of each temperature step in the melting
curve.
lid_temperature: str or Unit, optional
Specifies the lid temperature throughout the duration of the
thermocycling instruction
Returns
-------
Thermocycle
Returns the :py:class:`autoprotocol.instruction.Thermocycle`
instruction created from the specified parameters
Raises
------
AttributeError
If groups are not properly formatted
TypeError
If ref to thermocycle is not of type Container.
ValueError
Container specified cannot be thermocycled
ValueError
Lid temperature is not within bounds
"""
if not isinstance(ref, Container):
raise TypeError("Ref must be of type Container.")
if "thermocycle" not in ref.container_type.capabilities:
raise ValueError(
f"Container '{ref.name}' type '{ref.container_type.shortname}', cannot be thermocycled."
)
groups = [Thermocycle.builders.group(**_) for _ in groups]
dyes = Thermocycle.builders.dyes(**(dyes or {}))
melting = Thermocycle.builders.melting(
melting_start, melting_end, melting_increment, melting_rate
)
# Constants are currently based off the Biorad thermocyclers, and
# assumes that they are generally reflective of other thermocyclers
_MIN_LID_TEMP = Unit("30:celsius")
_MAX_LID_TEMP = Unit("110:celsius")
if lid_temperature is not None:
lid_temperature = parse_unit(lid_temperature)
if not (_MIN_LID_TEMP <= lid_temperature <= _MAX_LID_TEMP):
raise ValueError(
f"Lid temperature {lid_temperature} has to be within "
f"[{_MIN_LID_TEMP}, {_MAX_LID_TEMP}]"
)
self._add_seal(ref, "thermocycle")
return self._append_and_return(
Thermocycle(
object=ref,
groups=groups,
volume=volume,
dataref=dataref,
dyes=dyes,
melting=melting,
lid_temperature=lid_temperature,
)
)
def incubate(
self,
ref,
where,
duration,
shaking=False,
co2=0,
uncovered=False,
target_temperature=None,
shaking_params=None,
):
"""
Move plate to designated thermoisolater or ambient area for incubation
for specified duration.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
# a plate must be sealed/covered before it can be incubated
p.seal(sample_plate)
p.incubate(sample_plate, "warm_37", "1:hour", shaking=True)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"object": "sample_plate",
"op": "seal"
},
{
"duration": "1:hour",
"where": "warm_37",
"object": "sample_plate",
"shaking": true,
"op": "incubate",
"co2_percent": 0
}
]
Parameters
----------
ref : Ref or str
The container to be incubated
where : Enum({"ambient", "warm_37", "cold_4", "cold_20", "cold_80"})
Temperature at which to incubate specified container
duration : Unit or str
Length of time to incubate container
shaking : bool, optional
Specify whether or not to shake container if available at the
specified temperature
co2 : Number, optional
Carbon dioxide percentage
uncovered: bool, optional
Specify whether the container should be uncovered during incubation
target_temperature : Unit or str, optional
Specify a target temperature for a device (eg. an incubating block)
to reach during the specified duration.
shaking_params: dict, optional
Specify "path" and "frequency" of shaking parameters to be used
with compatible devices (eg. thermoshakes)
Returns
-------
Incubate
Returns the :py:class:`autoprotocol.instruction.Incubate`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types given, e.g. ref is not of type Container
RuntimeError
Incubating uncovered in a location which is shaking
"""
if not isinstance(ref, Container):
raise TypeError("Ref needs to be of type Container")
allowed_uncovered = ["ambient"]
if uncovered and (where not in allowed_uncovered or shaking):
raise RuntimeError(
f"If incubating uncovered, location must be in "
f"{', '.join(allowed_uncovered)} and not shaking."
)
if not isinstance(co2, Number):
raise TypeError("co2 must be a number.")
if target_temperature:
target_temperature = parse_unit(target_temperature, "celsius")
if not uncovered:
self._add_cover(ref, "incubate")
return self._append_and_return(
Incubate(
ref, where, duration, shaking, co2, target_temperature, shaking_params
)
)
def absorbance(
self,
ref,
wells,
wavelength,
dataref,
flashes=25,
incubate_before=None,
temperature=None,
settle_time=None,
):
"""
Read the absorbance for the indicated wavelength for the indicated
wells. Append an Absorbance instruction to the list of instructions for
this Protocol object.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.absorbance(sample_plate, sample_plate.wells_from(0,12),
"600:nanometer", "test_reading", flashes=50)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "test_reading",
"object": "sample_plate",
"wells": [
"A1",
"A2",
"A3",
"A4",
"A5",
"A6",
"A7",
"A8",
"A9",
"A10",
"A11",
"A12"
],
"num_flashes": 50,
"wavelength": "600:nanometer",
"op": "absorbance"
}
]
Parameters
----------
ref : str or Container
Object to execute the absorbance read on
wells : list(Well) or WellGroup or Well
WellGroup of wells to be measured or a list of well references in
the form of ["A1", "B1", "C5", ...]
wavelength : str or Unit
wavelength of light absorbance to be read for the indicated wells
dataref : str
name of this specific dataset of measured absorbances
flashes : int, optional
number of flashes for the read
temperature: str or Unit, optional
set temperature to heat plate reading chamber
settle_time: Unit, optional
the time before the start of the measurement, defaults
to vendor specifications
incubate_before: dict, optional
parameters for incubation before executing the plate read
See Also :meth:`Absorbance.builders.incubate_params`
Returns
-------
Absorbance
Returns the :py:class:`autoprotocol.instruction.Absorbance`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. wells given is of type Well, WellGroup
or list of wells
ValueError
Wells specified are not from the same container
ValueError
Settle time has to be greater than 0
UnitError
Settle time is not of type Unit
"""
wells = WellGroup(wells)
container = set([w.container for w in wells])
if len(container) > 1:
raise ValueError(
"All wells need to be on the same container for Absorbance"
)
wells = [str(w.index) for w in wells]
if incubate_before:
Absorbance.builders.incubate_params(**incubate_before)
if settle_time:
try:
settle_time = Unit(settle_time)
if settle_time < Unit(0, "second"):
raise ValueError(
"'settle_time' must be a time equal " "to or greater than 0."
)
except UnitError:
raise UnitError("'settle_time' must be of type Unit.")
return self._append_and_return(
Absorbance(
ref,
wells,
wavelength,
dataref,
flashes,
incubate_before,
temperature,
settle_time,
)
)
def fluorescence(
self,
ref,
wells,
excitation,
emission,
dataref,
flashes=25,
temperature=None,
gain=None,
incubate_before=None,
detection_mode=None,
position_z=None,
settle_time=None,
lag_time=None,
integration_time=None,
):
"""
Read the fluoresence for the indicated wavelength for the indicated
wells. Append a Fluorescence instruction to the list of instructions
for this Protocol object.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.fluorescence(sample_plate, sample_plate.wells_from(0,12),
excitation="587:nanometer", emission="610:nanometer",
dataref="test_reading")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "test_reading",
"excitation": "587:nanometer",
"object": "sample_plate",
"emission": "610:nanometer",
"wells": [
"A1",
"A2",
"A3",
"A4",
"A5",
"A6",
"A7",
"A8",
"A9",
"A10",
"A11",
"A12"
],
"num_flashes": 25,
"op": "fluorescence"
}
]
Parameters
----------
ref : str or Container
Container to plate read.
wells : list(Well) or WellGroup or Well
WellGroup of wells to be measured or a list of well references in
the form of ["A1", "B1", "C5", ...]
excitation : str or Unit
Wavelength of light used to excite the wells indicated
emission : str or Unit
Wavelength of light to be measured for the indicated wells
dataref : str
Name of this specific dataset of measured fluoresence
flashes : int, optional
Number of flashes.
temperature: str or Unit, optional
set temperature to heat plate reading chamber
gain: float, optional
float between 0 and 1, multiplier, gain=0.2 of maximum signal
amplification
incubate_before: dict, optional
parameters for incubation before executing the plate read
See Also :meth:`Fluorescence.builders.incubate_params`
detection_mode: str, optional
set the detection mode of the optics, ["top", "bottom"],
defaults to vendor specified defaults.
position_z: dict, optional
distance from the optics to the surface of the plate transport,
only valid for "top" detection_mode and vendor capabilities.
Specified as either a set distance - "manual", OR calculated from
a WellGroup - "calculated_from_wells". Only one position_z
determination may be specified
.. code-block:: none
position_z = {
"manual": Unit
- OR -
"calculated_from_wells": []
}
settle_time: Unit, optional
the time before the start of the measurement, defaults
to vendor specifications
lag_time: Unit, optional
time between flashes and the start of the signal integration,
defaults to vendor specifications
integration_time: Unit, optional
duration of the signal recording, per Well, defaults to vendor
specifications
Examples
--------
position_z:
.. code-block:: none
position_z = {
"calculated_from_wells": ["plate/A1", "plate/A2"]
}
-OR-
position_z = {
"manual": "20:micrometer"
}
Returns
-------
Fluorescence
Returns the :py:class:`autoprotocol.instruction.Fluorescence`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. wells given is of type Well, WellGroup
or list of wells
ValueError
Wells specified are not from the same container
ValueError
Settle time, integration time or lag time has to be greater than 0
UnitError
Settle time, integration time, lag time or position z is not
of type Unit
ValueError
Unknown value given for `detection_mode`
ValueError
Position z specified for non-top detection mode
KeyError
For position_z, only `manual` and `calculated_from_wells`
is allowed
NotImplementedError
Specifying `calculated_from_wells` as that has not been
implemented yet
"""
wells = WellGroup(wells)
container = set([w.container for w in wells])
if len(container) > 1:
raise ValueError(
"All wells need to be on the same container for Fluorescence"
)
wells = [str(w.index) for w in wells]
if gain is not None and not (0 <= gain <= 1):
raise ValueError(
f"fluorescence gain set to {gain} must be between 0 and 1, "
f"inclusive"
)
if incubate_before:
Fluorescence.builders.incubate_params(**incubate_before)
valid_detection_modes = ["top", "bottom"]
if detection_mode and detection_mode not in valid_detection_modes:
raise ValueError(
f"Unknown value for 'detection_mode'. Must be one of "
f"{valid_detection_modes}."
)
if detection_mode == "bottom" and position_z:
raise ValueError(
"position_z is only valid for 'top' detection_mode " "measurements."
)
if settle_time:
try:
settle_time = Unit(settle_time)
if settle_time < Unit(0, "second"):
raise ValueError(
"'settle_time' must be a time equal " "to or greater than 0."
)
except UnitError:
raise UnitError("'settle_time' must be of type Unit.")
if lag_time:
try:
lag_time = Unit(lag_time)
if lag_time < Unit(0, "second"):
raise ValueError(
"'lag_time' must be a time equal " "to or greater than 0."
)
except UnitError:
raise UnitError("'lag_time' must be of type Unit.")
if integration_time:
try:
integration_time = Unit(integration_time)
if integration_time < Unit(0, "second"):
raise ValueError(
"'integration_time' must be a time equal "
"to or greater than 0."
)
except UnitError:
raise UnitError("'integration_time' must be of type Unit.")
if position_z:
valid_pos_z = ["manual", "calculated_from_wells"]
if not isinstance(position_z, dict):
raise TypeError("'position_z' must be of type dict.")
if len(position_z.keys()) > 1:
raise ValueError(
"'position_z' can only have one mode of calculation "
"specified: 'manual' or 'calculated_from_wells'."
)
for k in position_z.keys():
if k not in valid_pos_z:
raise KeyError(
f"'position_z' keys can only be 'manual' or 'calculated_from_wells'. '{k}' is not a recognized key."
)
if "manual" in position_z.keys():
try:
manual = Unit(position_z["manual"])
if manual < Unit(0, "micrometer"):
raise ValueError(
"'manual' z_position must be a length equal "
"to or greater than 0."
)
except UnitError:
raise UnitError("'manual' position_z must be of type Unit.")
if "calculated_from_wells" in position_z.keys():
# blocking calculated_from_wells until fully implemented
# remove below RunTimeError to release feature
raise NotImplementedError(
"This feature, 'calculated_from_wells', "
"has not been implemented yet. Please use "
"'position_z':{'manual': 'set_value'} to "
"specify a position_z."
)
# pragma pylint: disable=unreachable, unused-variable
z_ws = position_z["calculated_from_wells"]
if isinstance(z_ws, Well):
z_ws = [z_ws]
position_z["calculated_from_wells"] = z_ws
elif isinstance(z_ws, list):
if not all(isinstance(w, Well) for w in z_ws):
raise TypeError(
"All elements in list must be wells for "
"position_z, 'calculated_from_wells'."
)
else:
raise TypeError(
"Wells specified for 'calculated_from_wells' "
"must be Well, list of wells, WellGroup."
)
# check z_ws against container/ref for measurement
# if ref is Container
if isinstance(ref, Container):
try:
valid_z_ws = ref.wells(z_ws)
except ValueError:
raise ValueError(
"Well indices specified for "
"'calculated_from_wells' must "
"be valid wells of the ref'd "
"container."
)
# pragma pylint: enable=unreachable, unused-variable
return self._append_and_return(
Fluorescence(
ref,
wells,
excitation,
emission,
dataref,
flashes,
incubate_before,
temperature,
gain,
detection_mode,
position_z,
settle_time,
lag_time,
integration_time,
)
)
def luminescence(
self,
ref,
wells,
dataref,
incubate_before=None,
temperature=None,
settle_time=None,
integration_time=None,
):
"""
Read luminescence of indicated wells.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.luminescence(sample_plate, sample_plate.wells_from(0,12),
"test_reading")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "test_reading",
"object": "sample_plate",
"wells": [
"A1",
"A2",
"A3",
"A4",
"A5",
"A6",
"A7",
"A8",
"A9",
"A10",
"A11",
"A12"
],
"op": "luminescence"
}
]
Parameters
----------
ref : str or Container
Container to plate read.
wells : list(Well) or WellGroup or Well
WellGroup of wells to be measured or a list of well references in
the form of ["A1", "B1", "C5", ...]
dataref : str
Name of this dataset of measured luminescence readings.
temperature: str or Unit, optional
set temperature to heat plate reading chamber
settle_time: Unit, optional
the time before the start of the measurement, defaults
to vendor specifications
incubate_before: dict, optional
parameters for incubation before executing the plate read
See Also :meth:`Absorbance.builders.incubate_params`
integration_time: Unit, optional
duration of the signal recording, per Well, defaults to vendor
specifications
Returns
-------
Luminescence
Returns the :py:class:`autoprotocol.instruction.Luminescence`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. wells given is of type Well, WellGroup
or list of wells
ValueError
Wells specified are not from the same container
ValueError
Settle time or integration time has to be greater than 0
UnitError
Settle time or integration time is not of type Unit
"""
wells = WellGroup(wells)
container = set([w.container for w in wells])
if len(container) > 1:
raise ValueError(
"All wells need to be on the same container for Luminescence"
)
wells = [str(w.index) for w in wells]
if incubate_before:
Luminescence.builders.incubate_params(**incubate_before)
if settle_time:
try:
settle_time = Unit(settle_time)
if settle_time < Unit(0, "second"):
raise ValueError(
"'settle_time' must be a time equal " "to or greater than 0."
)
except UnitError:
raise UnitError("'settle_time' must be of type Unit.")
if integration_time:
try:
integration_time = Unit(integration_time)
if integration_time < Unit(0, "second"):
raise ValueError(
"'integration_time' must be a time equal "
"to or greater than 0."
)
except UnitError:
raise UnitError("'integration_time' must be of type Unit.")
return self._append_and_return(
Luminescence(
ref,
wells,
dataref,
incubate_before,
temperature,
settle_time,
integration_time,
)
)
def gel_separate(self, wells, volume, matrix, ladder, duration, dataref):
"""
Separate nucleic acids on an agarose gel.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.gel_separate(sample_plate.wells_from(0,12), "10:microliter",
"agarose(8,0.8%)", "ladder1", "11:minute",
"genotyping_030214")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"dataref": "genotyping_030214",
"matrix": "agarose(8,0.8%)",
"volume": "10:microliter",
"ladder": "ladder1",
"objects": [
"sample_plate/0",
"sample_plate/1",
"sample_plate/2",
"sample_plate/3",
"sample_plate/4",
"sample_plate/5",
"sample_plate/6",
"sample_plate/7",
"sample_plate/8",
"sample_plate/9",
"sample_plate/10",
"sample_plate/11"
],
"duration": "11:minute",
"op": "gel_separate"
}
]
Parameters
----------
wells : list(Well) or WellGroup or Well
List of wells or WellGroup containing wells to be
separated on gel.
volume : str or Unit
Volume of liquid to be transferred from each well specified to a
lane of the gel.
matrix : str
Matrix (gel) in which to gel separate samples
ladder : str
Ladder by which to measure separated fragment size
duration : str or Unit
Length of time to run current through gel.
dataref : str
Name of this set of gel separation results.
Returns
-------
GelSeparate
Returns the :py:class:`autoprotocol.instruction.GelSeparate`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. wells given is of type Well, WellGroup
or list of wells
ValueError
Specifying more wells than the number of available lanes in
the selected matrix
"""
# Check valid well inputs
if not is_valid_well(wells):
raise TypeError(
"Wells must be of type Well, list of Wells, or " "WellGroup."
)
if isinstance(wells, Well):
wells = WellGroup(wells)
for w in wells:
self._remove_cover(w.container, "gel separate")
max_well = int(matrix.split("(", 1)[1].split(",", 1)[0])
if len(wells) > max_well:
raise ValueError(
"Number of wells is greater than available" "lanes in matrix"
)
return self._append_and_return(
GelSeparate(wells, volume, matrix, ladder, duration, dataref)
)
def gel_purify(self, extracts, volume, matrix, ladder, dataref):
"""
Separate nucleic acids on an agarose gel and purify according to
parameters. If gel extract lanes are not specified, they will be
sequentially ordered and purified on as many gels as necessary.
Each element in extracts specifies a source loaded in a single lane of
gel with a list of bands that will be purified from that lane. If the
same source is to be run on separate lanes, a new dictionary must be
added to extracts. It is also possible to add an element to extract
with a source but without a list of bands. In that case, the source
will be run in a lane without extraction.
Example Usage:
.. code-block:: python
p = Protocol()
sample_wells = p.ref("test_plate", None, "96-pcr",
discard=True).wells_from(0, 8)
extract_wells = [p.ref("extract_" + str(i.index), None,
"micro-1.5", storage="cold_4").well(0)
for i in sample_wells]
extracts = [make_gel_extract_params(
w,
make_band_param(
"TE",
"5:microliter",
80,
79,
extract_wells[i]))
for i, w in enumerate(sample_wells)]
p.gel_purify(extracts, "10:microliter",
"size_select(8,0.8%)", "ladder1",
"gel_purify_example")
Autoprotocol Output:
For extracts[0]
.. code-block:: none
{
"band_list": [
{
"band_size_range": {
"max_bp": 80,
"min_bp": 79
},
"destination": Well(Container(extract_0), 0, None),
"elution_buffer": "TE",
"elution_volume": "Unit(5.0, 'microliter')"
}
],
"gel": None,
"lane": None,
"source": Well(Container(test_plate), 0, None)
}
Parameters
----------
extracts: list(dict)
List of gel extraction parameters
See Also :meth:`GelPurify.builders.extract`
volume: str or Unit
Volume of liquid to be transferred from each well specified to a
lane of the gel.
matrix: str
Matrix (gel) in which to gel separate samples
ladder: str
Ladder by which to measure separated fragment size
dataref: str
Name of this set of gel separation results.
Returns
-------
GelPurify
Returns the :py:class:`autoprotocol.instruction.GelPurify`
instruction created from the specified parameters
Raises
-------
RuntimeError
If matrix is not properly formatted.
AttributeError
If extract parameters are not a list of dictionaries.
KeyError
If extract parameters do not contain the specified parameter keys.
ValueError
If min_bp is greater than max_bp.
ValueError
If extract destination is not of type Well.
ValueError
If extract elution volume is not of type Unit
ValueError
if extract elution volume is not greater than 0.
RuntimeError
If gel extract lanes are set for some but not all extract wells.
RuntimeError
If all samples do not fit on single gel type.
TypeError
If lane designated for gel extracts is not an integer.
RuntimeError
If designated lane index is outside lanes within the gel.
RuntimeError
If lanes not designated and number of extracts not equal to number
of samples.
"""
from itertools import groupby
from operator import itemgetter
try:
max_well = int(matrix.split("(", 1)[1].split(",", 1)[0])
except (AttributeError, IndexError):
raise RuntimeError("Matrix specified is not properly formatted.")
volume = Unit(volume)
if volume <= Unit("0:microliter"):
raise ValueError(f"Volume: {volume}, must be greater than 0:microliter")
if not isinstance(ladder, str):
raise TypeError(f"Ladder: {ladder}, must be a string")
if not isinstance(dataref, str):
raise TypeError(f"Datref: {dataref}, must be a string")
if not isinstance(extracts, list):
extracts = [extracts]
extracts = [GelPurify.builders.extract(**_) for _ in extracts]
gel_set = [e["gel"] for e in extracts]
lane_set = [e["lane"] for e in extracts]
if None in gel_set:
if any(gel_set):
raise RuntimeError(
"If one extract has gel set, all extracts must have " "gel set"
)
else:
if None in lane_set:
if any(lane_set):
raise RuntimeError(
"If one extract has lane set, all extracts must "
"have lane set"
)
else:
for i, e in enumerate(extracts):
e["gel"] = i // max_well
e["lane"] = i % max_well
else:
for e in extracts:
e["gel"] = 0
sort_key = itemgetter("gel")
parsed_extracts = [
list(grp)
for key, grp in groupby(sorted(extracts, key=sort_key), key=sort_key)
]
instructions = []
for pe in parsed_extracts:
lane_set = [e["lane"] for e in pe]
if len(lane_set) > max_well:
raise RuntimeError(
f"The gel is not large enough to accomodate all lanes: gel has {max_well} wells, {len(lane_set)} lanes specified"
)
if None in lane_set:
if any(lane_set):
raise RuntimeError(
"If one extract has lane set, all extracts must have "
"lane set"
)
else:
for i, e in enumerate(pe):
e["lane"] = i % max_well
lane_set = [e["lane"] for e in pe]
if sorted(lane_set) != list(range(0, max(lane_set) + 1)):
raise RuntimeError("Lanes must be contiguous, unique, and start from 0")
if len(parsed_extracts) > 1:
dataref_gel = f"{dataref}_{pe[0]['gel']}"
else:
dataref_gel = dataref
pe = sorted(pe, key=itemgetter("lane"))
samples = [e["source"] for e in pe]
pe_unpacked = []
for e in pe:
for b in e["band_list"]:
ext = b
ext["lane"] = e["lane"]
pe_unpacked.append(ext)
instructions.append(
self._append_and_return(
GelPurify(samples, volume, matrix, ladder, dataref_gel, pe_unpacked)
)
)
return instructions
def seal(self, ref, type=None, mode=None, temperature=None, duration=None):
"""
Seal indicated container using the automated plate sealer.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
p.seal(sample_plate, mode="thermal", temperature="160:celsius")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"object": "sample_plate",
"type": "ultra-clear",
"mode": "thermal",
"mode_params": {
"temperature": "160:celsius"
}
"op": "seal"
}
]
Parameters
----------
ref : Container
Container to be sealed
type : str, optional
Seal type to be used, such as "ultra-clear" or "foil".
mode: str, optional
Sealing method to be used, such as "thermal" or "adhesive". Defaults
to None, which is interpreted sensibly based on the execution
environment.
temperature: Unit or str, optional
Temperature at which to melt the sealing film onto the ref. Only
applicable to thermal sealing; not respected if the sealing mode
is adhesive. If unspecified, thermal sealing temperature defaults
correspond with manufacturer-recommended or internally-optimized
values for the target container type. Applies only to thermal
sealing.
duration: Unit or str, optional
Duration for which to press the (heated, if thermal) seal down on
the ref. Defaults to manufacturer-recommended or internally-
optimized seal times for the target container type. Currently
applies only to thermal sealing.
Returns
-------
Seal
Returns the :py:class:`autoprotocol.instruction.Seal`
instruction created from the specified parameters
Raises
------
TypeError
If ref is not of type Container.
RuntimeError
If container type does not have `seal` capability.
RuntimeError
If seal is not a valid seal type.
RuntimeError
If the sealing mode is invalid, or incompatible with the given ref
RuntimeError
If thermal sealing params (temperature and/or duration) are
specified alongside an adhesive sealing mode.
RuntimeError
If specified thermal sealing parameters are invalid
RuntimeError
If container is already covered with a lid.
"""
SEALING_MODES = ["thermal", "adhesive"]
if not isinstance(ref, Container):
raise TypeError("Container to seal must be of type Container.")
if "seal" not in ref.container_type.capabilities:
raise RuntimeError(
f"Container '{ref.name}' type '{ref.container_type.shortname}',"
f" cannot be sealed."
)
if type is None:
type = ref.container_type.seal_types[0]
if type not in SEAL_TYPES:
raise RuntimeError(f"{type} is not a valid seal type")
if ref.is_covered():
raise RuntimeError("A container cannot be sealed over a lid.")
if not (mode is None or mode in SEALING_MODES):
raise RuntimeError(f"{mode} is not a valid sealing mode")
if temperature is not None or duration is not None:
if mode == "adhesive":
raise RuntimeError(
"Thermal sealing parameters `temperature` and `duration` "
"are incompatible with the chosen adhesive sealing mode."
)
mode = "thermal"
mode_params = dict()
if temperature is not None:
mode_params["temperature"] = parse_unit(temperature, "celsius")
if duration is not None:
mode_params["duration"] = parse_unit(duration, "second")
else:
mode_params = None
if not ref.is_sealed():
ref.cover = type
return self._append_and_return(Seal(ref, type, mode, mode_params))
def unseal(self, ref):
"""
Remove seal from indicated container using the automated plate
unsealer.
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37")
# a plate must be sealed to be unsealed
p.seal(sample_plate)
p.unseal(sample_plate)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"object": "sample_plate",
"op": "seal",
"type": "ultra-clear"
},
{
"object": "sample_plate",
"op": "unseal"
}
]
Parameters
----------
ref : Container
Container to be unsealed.
Returns
-------
Unseal
Returns the :py:class:`autoprotocol.instruction.Unseal`
instruction created from the specified parameters
Raises
------
TypeError
If ref is not of type Container.
RuntimeError
If container is covered with a lid not a seal.
"""
if not isinstance(ref, Container):
raise TypeError("Container to unseal must be of type Container.")
if ref.is_covered():
raise RuntimeError(
"A container with a cover cannot be unsealed, "
"use the instruction uncover."
)
if ref.is_sealed():
ref.cover = None
unseal_inst = Unseal(ref)
return self._append_and_return(unseal_inst)
def cover(self, ref, lid=None, retrieve_lid=None):
"""
Place specified lid type on specified container
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
p.cover(sample_plate, lid="universal")
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"lid": "universal",
"object": "sample_plate",
"op": "cover"
}
]
Parameters
----------
ref : Container
Container to be convered.
lid : str, optional
Type of lid to cover the container. Must be a valid lid type for
the container type.
retrieve_lid: bool, optional
Flag to retrieve lid from previously stored location (see uncover).
Returns
-------
Cover
Returns the :py:class:`autoprotocol.instruction.Cover`
instruction created from the specified parameters
Raises
------
TypeError
If ref is not of type Container.
RuntimeError
If container type does not have `cover` capability.
RuntimeError
If lid is not a valid lid type.
RuntimeError
If container is already sealed with a seal.
TypeError
If retrieve_lid is not a boolean.
"""
if not isinstance(ref, Container):
raise TypeError("Container to cover must be of type Container.")
if "cover" not in ref.container_type.capabilities:
raise RuntimeError(
f"Container '{ref.name}' type '{ref.container_type.shortname}',"
f" cannot be covered."
)
if lid is None:
lid = ref.container_type.cover_types[0]
if lid not in COVER_TYPES:
raise RuntimeError(f"{lid} is not a valid lid type")
if ref.is_sealed():
raise RuntimeError("A container cannot be covered over a seal.")
if retrieve_lid is not None and not isinstance(retrieve_lid, bool):
raise TypeError("Cover: retrieve_lid must be of type bool")
if not ref.is_covered():
ref.cover = lid
return self._append_and_return(Cover(ref, lid, retrieve_lid))
def uncover(self, ref, store_lid=None):
"""
Remove lid from specified container
Example Usage:
.. code-block:: python
p = Protocol()
sample_plate = p.ref("sample_plate",
None,
"96-flat",
storage="warm_37")
# a plate must have a cover to be uncovered
p.cover(sample_plate, lid="universal")
p.uncover(sample_plate)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"lid": "universal",
"object": "sample_plate",
"op": "cover"
},
{
"object": "sample_plate",
"op": "uncover"
}
]
Parameters
----------
ref : Container
Container to remove lid.
store_lid: bool, optional
Flag to store the uncovered lid.
Returns
-------
Uncover
Returns the :py:class:`autoprotocol.instruction.Uncover`
instruction created from the specified parameters
Raises
------
TypeError
If ref is not of type Container.
RuntimeError
If container is sealed with a seal not covered with a lid.
TypeError
If store_lid is not a boolean.
"""
if not isinstance(ref, Container):
raise TypeError("Container to uncover must be of type Container.")
if ref.is_sealed():
raise RuntimeError(
"A container with a seal cannot be uncovered, "
"use the instruction unseal."
)
if store_lid is not None and not isinstance(store_lid, bool):
raise TypeError("Uncover: store_lid must be of type bool")
if ref.is_covered():
ref.cover = None
return self._append_and_return(Uncover(ref, store_lid))
def flow_cytometry(
self,
dataref,
samples,
lasers,
collection_conditions,
width_threshold=None,
window_extension=None,
remove_coincident_events=None,
):
"""
A non-ambiguous set of parameters for performing flow cytometry.
Parameters
----------
dataref : str
Name of dataset that will be returned.
samples : list(Well) or Well or WellGroup
Wells to be analyzed
lasers : list(dict)
See FlowCytometryBuilders.laser.
collection_conditions : dict
See FlowCytometryBuilders.collection_conditions.
width_threshold : int or float, optional
Threshold to determine width measurement.
window_extension : int or float, optional
Front and rear window extension.
remove_coincident_events : bool, optional
Remove coincident events.
Returns
-------
FlowCytometry
Returns a :py:class:`autoprotocol.instruction.FlowCytometry`
instruction created from the specified parameters.
Raises
------
TypeError
If `lasers` is not of type list.
TypeError
If `samples` is not of type Well, list of Well, or WellGroup.
TypeError
If `width_threshold` is not a number.
TypeError
If `window_extension` is not a number.
TypeError
If `remove_coincident_events` is not of type bool.
Examples
--------
Example flow cytometry protocol
.. code-block:: python
p = Protocol()
plate = p.ref("sample-plate", cont_type="384-flat", discard=True)
lasers = [FlowCytometry.builders.laser(
excitation="405:nanometers",
channels=[
FlowCytometry.builders.channel(
emission_filter=FlowCytometry.builders.emission_filter(
channel_name="VL1",
shortpass="415:nanometers",
longpass="465:nanometers"
),
detector_gain="10:millivolts"
)
]
)]
collection_conds = FlowCytometry.builders.collection_conditions(
acquisition_volume="5.0:ul",
flowrate="12.5:ul/min",
wait_time="10:seconds",
mix_cycles=10,
mix_volume="10:ul",
rinse_cycles=10
)
p.flow_cytometry("flow-1234", plate.wells_from(0, 3), lasers,
collection_conds)
Autoprotocol Output:
.. code-block:: json
{
"op": "flow_cytometry",
"dataref": "flow-1234",
"samples": [
"sample-plate/0",
"sample-plate/1",
"sample-plate/2"
],
"lasers": [
{
"excitation": "405:nanometer",
"channels": [
{
"emission_filter": {
"channel_name": "VL1",
"shortpass": "415:nanometer",
"longpass": "465:nanometer"
},
"detector_gain": "10:millivolt"
}
]
}
],
"collection_conditions": {
"acquisition_volume": "5:microliter",
"flowrate": "12.5:microliter/minute",
"stop_criteria": {
"volume": "5:microliter"
},
"wait_time": "10:second",
"mix_cycles": 10,
"mix_volume": "10:microliter",
"rinse_cycles": 10
}
}
"""
if not isinstance(lasers, list):
raise TypeError("lasers must be of type list.")
if not is_valid_well(samples):
raise TypeError(
"samples must be of type Well, list of Well, " "or WellGroup."
)
if width_threshold is not None:
if not isinstance(width_threshold, Number):
raise TypeError("width_threshold must be a number.")
if window_extension is not None:
if not isinstance(window_extension, Number):
raise TypeError("window_extension must be a number.")
if remove_coincident_events is not None:
if not isinstance(remove_coincident_events, bool):
raise TypeError("remove_coincident_events must be of type " "bool.")
lasers = [FlowCytometry.builders.laser(**_) for _ in lasers]
collection_conditions = FlowCytometry.builders.collection_conditions(
**collection_conditions
)
return self._append_and_return(
FlowCytometry(
dataref,
samples,
lasers,
collection_conditions,
width_threshold,
window_extension,
remove_coincident_events,
)
)
def flow_analyze(
self, dataref, FSC, SSC, neg_controls, samples, colors=None, pos_controls=None
):
"""
Perform flow cytometry. The instruction will be executed within the
voltage range specified for each channel, optimized for the best sample
separation/distribution that can be achieved within these limits. The
vendor will specify the device that this instruction is executed on and
which excitation and emission spectra are available. At least one
negative control is required, which will be used to define data
acquisition parameters as well as to determine any autofluorescent
properties for the sample set. Additional negative positive control
samples are optional. Positive control samples will be used to
optimize single color signals and, if desired, to minimize bleed into
other channels.
For each sample this instruction asks you to specify the `volume`
and/or `captured_events`. Vendors might also require `captured_events`
in case their device does not support volumetric sample intake. If
both conditions are supported, the vendor will specify if data will be
collected only until the first one is met or until both conditions are
fulfilled.
Example Usage:
.. code-block:: python
p = Protocol()
dataref = "test_ref"
FSC = {"voltage_range": {"low": "230:volt", "high": "280:volt"},
"area": True, "height": True, "weight": False}
SSC = {"voltage_range": {"low": "230:volt", "high": "280:volt"},
"area": True, "height": True, "weight": False}
neg_controls = {"well": "well0", "volume": "100:microliter",
"captured_events": 5, "channel": "channel0"}
samples = [
{
"well": "well0",
"volume": "100:microliter",
"captured_events": 9
}
]
p.flow_analyze(dataref, FSC, SSC, neg_controls,
samples, colors=None, pos_controls=None)
Autoprotocol Output:
.. code-block:: json
{
"channels": {
"FSC": {
"voltage_range": {
"high": "280:volt",
"low": "230:volt"
},
"area": true,
"height": true,
"weight": false
},
"SSC": {
"voltage_range": {
"high": "280:volt",
"low": "230:volt"
},
"area": true,
"height": true,
"weight": false
}
},
"op": "flow_analyze",
"negative_controls": {
"channel": "channel0",
"well": "well0",
"volume": "100:microliter",
"captured_events": 5
},
"dataref": "test_ref",
"samples": [
{
"well": "well0",
"volume": "100:microliter",
"captured_events": 9
}
]
}
Parameters
----------
dataref : str
Name of flow analysis dataset generated.
FSC : dict
Dictionary containing FSC channel parameters in the form of:
.. code-block:: none
{
"voltage_range": {
"low": "230:volt",
"high": "280:volt"
},
"area": true, //default: true
"height": true, //default: true
"weight": false //default: false
}
SSC : dict
Dictionary of SSC channel parameters in the form of:
.. code-block:: none
{
"voltage_range": {
"low": <voltage>,
"high": <voltage>"
},
"area": true, //default: true
"height": true, //default: false
"weight": false //default: false
}
neg_controls : list(dict)
List of negative control wells in the form of:
.. code-block:: none
{
"well": well,
"volume": volume,
"captured_events": integer, // optional, default infinity
"channel": [channel_name]
}
at least one negative control is required.
samples : list(dict)
List of samples in the form of:
.. code-block:: none
{
"well": well,
"volume": volume,
"captured_events": integer // optional, default infinity
}
at least one sample is required
colors : list(dict), optional
Optional list of colors in the form of:
.. code-block:: none
[
{
"name": "FitC",
"emission_wavelength": "495:nanometer",
"excitation_wavelength": "519:nanometer",
"voltage_range": {
"low": <voltage>,
"high": <voltage>
},
"area": true, //default: true
"height": false, //default: false
"weight": false //default: false
}, ...
]
pos_controls : list(dict), optional
Optional list of positive control wells in the form of:
.. code-block:: none
[
{
"well": well,
"volume": volume,
"captured_events": integer, // default: infinity
"channel": [channel_name],
"minimize_bleed": [{ // optional
"from": color,
"to": [color]
}, ...
]
Returns
-------
FlowAnalyze
Returns the :py:class:`autoprotocol.instruction.FlowAnalyze`
instruction created from the specified parameters
Raises
------
TypeError
If inputs are not of the correct type.
UnitError
If unit inputs are not properly formatted.
AssertionError
If required parameters are missing.
ValueError
If volumes are not correctly formatted or present.
"""
sources = []
controls = []
if not isinstance(samples, list):
raise TypeError("Samples must be of type list.")
else:
sources.extend(samples)
if not isinstance(neg_controls, list):
raise TypeError("Neg_controls must be of type list.")
else:
sources.extend(neg_controls)
controls.extend(neg_controls)
if pos_controls and not isinstance(pos_controls, list):
raise TypeError("Pos_controls must be of type list.")
elif pos_controls:
sources.extend(pos_controls)
controls.extend(neg_controls)
for s in sources:
if not isinstance(s.get("well"), Well):
raise TypeError(
"The well for each sample or control must " "be of type Well."
)
try:
Unit(s.get("volume"))
except (ValueError, TypeError) as e:
raise ValueError(
f"Each sample or control must indicate a "
f"volume of type unit. {e}"
)
if s.get("captured_events") and not isinstance(
s.get("captured_events"), int
):
raise TypeError(
"captured_events is optional, if given it"
" must be of type integer."
)
for c in controls:
if not isinstance(c.get("channel"), list):
raise TypeError(
"Channel must be a list of strings "
"indicating the colors/channels that this"
" control is to be used for."
)
if c.get("minimize_bleed") and not isinstance(
c.get("minimize_bleed"), list
):
raise TypeError("Minimize_bleed must be of type list.")
elif c.get("minimize_bleed"):
for b in c["minimize_bleed"]:
if not isinstance(b, dict):
raise TypeError(
"Minimize_bleed must be a list of "
"dictonaries. Dictonary was not found"
)
else:
if not b.get("from"):
raise ValueError(
"Minimize_bleed dictonaries must" " have a key `from`"
)
else:
if not isinstance(b["from"], str):
raise TypeError(
"Minimize_bleed `from` must "
"have a string as value"
)
if not b.get("to"):
raise ValueError(
"Minimize_bleed dictonaries must" " have a key `to`"
)
else:
if not isinstance(b["to"], list):
raise ValueError(
"Minimize_bleed `to` must " "have a list as value"
)
else:
for t in b["to"]:
if not isinstance(t, str):
raise TypeError(
"Minimize_bleed `to` "
"list must contain "
"strings."
)
assert FSC and SSC, "You must include parameters for FSC and SSC " "channels."
channels = {}
channels["FSC"] = FSC
channels["SSC"] = SSC
for c in channels.values():
if not isinstance(c, dict):
raise TypeError("Each channel must be of type dict.")
assert c["voltage_range"], (
"You must include a voltage_range for" " each channel."
)
assert c["voltage_range"]["high"], (
"You must include an upper "
"limit for the volage range"
"in each channel."
)
assert c["voltage_range"]["low"], (
"You must include a lower "
"limit for the volage range "
"in each channel."
)
if colors:
if not isinstance(colors, list):
raise TypeError("Colors must be of type list.")
else:
for c in colors:
if not isinstance(c, dict):
raise TypeError("Colors must contain elements of " "type dict.")
else:
if not c.get("name") or not isinstance(c.get("name"), str):
raise TypeError(
"Each color must have a `name` "
"that is of type string."
)
if c.get("emission_wavelength"):
try:
Unit(c.get("emission_wavelength"))
except (UnitError):
raise UnitError(
"Each `emission_wavelength` "
"must be of type unit."
)
else:
raise ValueError(
"Each color must have an " "`emission_wavelength`."
)
if c.get("excitation_wavelength"):
try:
Unit(c.get("excitation_wavelength"))
except (UnitError):
raise UnitError(
"Each `excitation_wavelength` "
"must be of type unit."
)
else:
raise ValueError(
"Each color must have an " "`excitation_wavelength`."
)
for s in sources:
self._remove_cover(s["well"].container, "flow_analyze")
return self._append_and_return(
FlowAnalyze(dataref, FSC, SSC, neg_controls, samples, colors, pos_controls)
)
def oligosynthesize(self, oligos):
"""
Specify a list of oligonucleotides to be synthesized and a destination
for each product.
Example Usage:
.. code-block:: python
oligo_1 = p.ref("oligo_1", None, "micro-1.5", discard=True)
p.oligosynthesize([{"sequence": "CATGGTCCCCTGCACAGG",
"destination": oligo_1.well(0),
"scale": "25nm",
"purification": "standard"}])
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"oligos": [
{
"destination": "oligo_1/0",
"sequence": "CATGGTCCCCTGCACAGG",
"scale": "25nm",
"purification": "standard"
}
],
"op": "oligosynthesize"
}
]
Parameters
----------
oligos : list(dict)
List of oligonucleotides to synthesize. Each dictionary should
contain the oligo's sequence, destination, scale and purification
.. code-block:: none
[
{
"destination": "my_plate/A1",
"sequence": "GATCRYMKSWHBVDN",
// - standard IUPAC base codes
// - IDT also allows rX (RNA), mX (2' O-methyl RNA), and
// X*/rX*/mX* (phosphorothioated)
// - they also allow inline annotations for
// modifications,
// e.g. "GCGACTC/3Phos/" for a 3' phosphorylation
// e.g. "aggg/iAzideN/cgcgc" for an
// internal modification
"scale": "25nm" | "100nm" | "250nm" | "1um",
"purification": "standard" | "page" | "hplc",
// default: standard
}, ...
]
Returns
-------
Oligosynthesize
Returns the :py:class:`autoprotocol.instruction.Oligosynthesize`
instruction created from the specified parameters
"""
return self._append_and_return(Oligosynthesize(oligos))
def autopick(self, sources, dests, min_abort=0, criteria=None, dataref="autopick"):
"""
Pick colonies from the agar-containing location(s) specified in
`sources` to the location(s) specified in `dests` in highest to lowest
rank order until there are no more colonies available. If fewer than
min_abort pickable colonies have been identified from the location(s)
specified in `sources`, the run will stop and no further instructions
will be executed.
Example Usage:
Autoprotocol Output:
Parameters
----------
sources : Well or WellGroup or list(Well)
Reference wells containing agar and colonies to pick
dests : Well or WellGroup or list(Well)
List of destination(s) for picked colonies
criteria : dict
Dictionary of autopicking criteria.
min_abort : int, optional
Total number of colonies that must be detected in the aggregate
list of `from` wells to avoid aborting the entire run.
dataref: str
Name of dataset to save the picked colonies to
Returns
-------
Autopick
Returns the :py:class:`autoprotocol.instruction.Autopick`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types for sources and dests
ValueError
Source wells are not all from the same container
"""
# Check valid well inputs
if not is_valid_well(sources):
raise TypeError(
"Source must be of type Well, list of Wells, or " "WellGroup."
)
if not is_valid_well(dests):
raise TypeError(
"Destinations (dests) must be of type Well, "
"list of Wells, or WellGroup."
)
pick = {}
sources = WellGroup(sources)
pick["from"] = sources
if len(set([s.container for s in pick["from"]])) > 1:
raise ValueError(
"All source wells for autopick must exist " "on the same container"
)
dests = WellGroup(dests)
pick["to"] = dests
pick["min_abort"] = min_abort
group = [pick]
for s in pick["from"]:
self._remove_cover(s.container, "autopick")
for d in pick["to"]:
self._remove_cover(d.container, "autopick")
criteria = {} if criteria is None else criteria
return self._append_and_return(Autopick(group, criteria, dataref))
def mag_dry(self, head, container, duration, new_tip=False, new_instruction=False):
"""
Dry beads with magnetized tips above and outside a container for a set
time.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("plate_0", None, "96-pcr", storage="cold_20")
p.mag_dry("96-pcr", plate, "30:minute", new_tip=False,
new_instruction=False)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
[
{
"dry": {
"duration": "30:minute",
"object": "plate_0"
}
}
]
],
"magnetic_head": "96-pcr",
"op": "magnetic_transfer"
}
]
Parameters
----------
head : str
Magnetic head to use for the magnetic bead transfers
container : Container
Container to dry beads above
duration : str or Unit
Time for drying
new_tip : bool
Specify whether to use a new tip to complete the step
new_instruction: bool
Specify whether to create a new magnetic_transfer instruction
Returns
-------
MagneticTransfer
Returns the :py:class:`autoprotocol.instruction.MagneticTransfer`
instruction created from the specified parameters
"""
mag = MagneticTransfer.builders.mag_dry(object=container, duration=duration)
self._remove_cover(container, "mag_dry")
return self._add_mag(mag, head, new_tip, new_instruction, "dry")
def mag_incubate(
self,
head,
container,
duration,
magnetize=False,
tip_position=1.5,
temperature=None,
new_tip=False,
new_instruction=False,
):
"""
Incubate the container for a set time with tips set at `tip_position`.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("plate_0", None, "96-pcr", storage="cold_20")
p.mag_incubate("96-pcr", plate, "30:minute", magnetize=False,
tip_position=1.5, temperature=None, new_tip=False)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
[
{
"incubate": {
"duration": "30:minute",
"tip_position": 1.5,
"object": "plate_0",
"magnetize": false,
"temperature": null
}
}
]
],
"magnetic_head": "96-pcr",
"op": "magnetic_transfer"
}
]
Parameters
----------
head : str
Magnetic head to use for the magnetic bead transfers
container : Container
Container to incubate beads
duration : str or Unit
Time for incubation
magnetize : bool
Specify whether to magnetize the tips
tip_position : float
Position relative to well height that tips are held
temperature: str or Unit, optional
Temperature heat block is set at
new_tip : bool
Specify whether to use a new tip to complete the step
new_instruction: bool
Specify whether to create a new magnetic_transfer instruction
Returns
-------
MagneticTransfer
Returns the :py:class:`autoprotocol.instruction.MagneticTransfer`
instruction created from the specified parameters
"""
mag = MagneticTransfer.builders.mag_incubate(
object=container,
duration=duration,
magnetize=magnetize,
tip_position=tip_position,
temperature=temperature,
)
self._remove_cover(container, "mag_incubate")
return self._add_mag(mag, head, new_tip, new_instruction, "incubate")
def mag_collect(
self,
head,
container,
cycles,
pause_duration,
bottom_position=0.0,
temperature=None,
new_tip=False,
new_instruction=False,
):
"""
Collect beads from a container by cycling magnetized tips in and out
of the container with an optional pause at the bottom of the insertion.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("plate_0", None, "96-pcr", storage="cold_20")
p.mag_collect("96-pcr", plate, 5, "30:second", bottom_position=
0.0, temperature=None, new_tip=False,
new_instruction=False)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
[
{
"collect": {
"bottom_position": 0,
"object": "plate_0",
"temperature": null,
"cycles": 5,
"pause_duration": "30:second"
}
}
]
],
"magnetic_head": "96-pcr",
"op": "magnetic_transfer"
}
]
Parameters
----------
head : str
Magnetic head to use for the magnetic bead transfers
container : Container
Container to incubate beads
cycles: int
Number of cycles to raise and lower tips
pause_duration : str or Unit
Time tips are paused in bottom position each cycle
bottom_position : float
Position relative to well height that tips are held during pause
temperature: str or Unit
Temperature heat block is set at
new_tip : bool
Specify whether to use a new tip to complete the step
new_instruction: bool
Specify whether to create a new magnetic_transfer instruction
Returns
-------
MagneticTransfer
Returns the :py:class:`autoprotocol.instruction.MagneticTransfer`
instruction created from the specified parameters
"""
mag = MagneticTransfer.builders.mag_collect(
object=container,
cycles=cycles,
pause_duration=pause_duration,
bottom_position=bottom_position,
temperature=temperature,
)
self._remove_cover(container, "mag_collect")
return self._add_mag(mag, head, new_tip, new_instruction, "collect")
def mag_release(
self,
head,
container,
duration,
frequency,
center=0.5,
amplitude=0.5,
temperature=None,
new_tip=False,
new_instruction=False,
):
"""
Release beads into a container by cycling tips in and out of the
container with tips unmagnetized.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("plate_0", None, "96-pcr", storage="cold_20")
p.mag_release("96-pcr", plate, "30:second", "60:hertz", center=0.75,
amplitude=0.25, temperature=None, new_tip=False,
new_instruction=False)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
[
{
"release": {
"center": 0.75,
"object": "plate_0",
"frequency": "2:hertz",
"amplitude": 0.25,
"duration": "30:second",
"temperature": null
}
}
]
],
"magnetic_head": "96-pcr",
"op": "magnetic_transfer"
}
]
Parameters
----------
head : str
Magnetic head to use for the magnetic bead transfers
container : Container
Container to incubate beads
duration : str or Unit
Total time for this sub-operation
frequency : str or Unit
Cycles per second (hertz) that tips are raised and lowered
center : float
Position relative to well height where oscillation is centered
amplitude : float
Distance relative to well height to oscillate around "center"
temperature: str or Unit
Temperature heat block is set at
new_tip : bool
Specify whether to use a new tip to complete the step
new_instruction: bool
Specify whether to create a new magnetic_transfer instruction
Returns
-------
MagneticTransfer
Returns the :py:class:`autoprotocol.instruction.MagneticTransfer`
instruction created from the specified parameters
"""
mag = MagneticTransfer.builders.mag_release(
object=container,
duration=duration,
frequency=frequency,
center=center,
amplitude=amplitude,
temperature=temperature,
)
self._remove_cover(container, "mag_release")
return self._add_mag(mag, head, new_tip, new_instruction, "release")
def mag_mix(
self,
head,
container,
duration,
frequency,
center=0.5,
amplitude=0.5,
magnetize=False,
temperature=None,
new_tip=False,
new_instruction=False,
):
"""
Mix beads in a container by cycling tips in and out of the
container.
Example Usage:
.. code-block:: python
p = Protocol()
plate = p.ref("plate_0", None, "96-pcr", storage="cold_20")
p.mag_mix("96-pcr", plate, "30:second", "60:hertz", center=0.75,
amplitude=0.25, magnetize=True, temperature=None,
new_tip=False, new_instruction=False)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"groups": [
[
{
"mix": {
"center": 0.75,
"object": "plate_0",
"frequency": "2:hertz",
"amplitude": 0.25,
"duration": "30:second",
"magnetize": true,
"temperature": null
}
}
]
],
"magnetic_head": "96-pcr",
"op": "magnetic_transfer"
}
]
Parameters
----------
head : str
Magnetic head to use for the magnetic bead transfers
container : Container
Container to incubate beads
duration : str or Unit
Total time for this sub-operation
frequency : str or Unit
Cycles per second (hertz) that tips are raised and lowered
center : float
Position relative to well height where oscillation is centered
amplitude : float
Distance relative to well height to oscillate around "center"
magnetize : bool
Specify whether to magnetize the tips
temperature: str or Unit
Temperature heat block is set at
new_tip : bool
Specify whether to use a new tip to complete the step
new_instruction: bool
Specify whether to create a new magnetic_transfer instruction
Returns
-------
MagneticTransfer
Returns the :py:class:`autoprotocol.instruction.MagneticTransfer`
instruction created from the specified parameters
"""
mag = MagneticTransfer.builders.mag_mix(
object=container,
duration=duration,
frequency=frequency,
center=center,
amplitude=amplitude,
magnetize=magnetize,
temperature=temperature,
)
self._remove_cover(container, "mag_mix")
return self._add_mag(mag, head, new_tip, new_instruction, "mix")
def image_plate(self, ref, mode, dataref):
"""
Capture an image of the specified container.
Example Usage:
.. code-block:: python
p = Protocol()
agar_plate = p.ref("agar_plate", None, "1-flat", discard=True)
bact = p.ref("bacteria", None, "micro-1.5", discard=True)
p.spread(bact.well(0), agar_plate.well(0), "55:microliter")
p.incubate(agar_plate, "warm_37", "18:hour")
p.image_plate(agar_plate, mode="top", dataref="my_plate_image_1")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"bacteria": {
"new": "micro-1.5",
"discard": true
},
"agar_plate": {
"new": "1-flat",
"discard": true
}
},
"instructions": [
{
"volume": "55.0:microliter",
"to": "agar_plate/0",
"from": "bacteria/0",
"op": "spread"
},
{
"where": "warm_37",
"object": "agar_plate",
"co2_percent": 0,
"duration": "18:hour",
"shaking": false,
"op": "incubate"
},
{
"dataref": "my_plate_image_1",
"object": "agar_plate",
"mode": "top",
"op": "image_plate"
}
]
}
Parameters
----------
ref : str or Container
Container to take image of
mode : str
Imaging mode (currently supported: "top")
dataref : str
Name of data reference of resulting image
Returns
-------
ImagePlate
Returns the :py:class:`autoprotocol.instruction.ImagePlate`
instruction created from the specified parameters
"""
return self._append_and_return(ImagePlate(ref, mode, dataref))
def provision(self, resource_id, dests, amounts=None, volumes=None):
"""
Provision a commercial resource from a catalog into the specified
destination well(s). A new tip is used for each destination well
specified to avoid contamination.
Parameters
----------
resource_id : str
Resource ID from catalog.
dests : Well or WellGroup or list(Well)
Destination(s) for specified resource.
amounts : str or Unit or list(str) or list(Unit)
Volume(s) or Mass(es) to transfer of the resource to each destination well. If
one volume or mass is specified, each destination well receive that volume or mass of
the resource. If destinations should receive different volume or mass, each
one should be specified explicitly in a list matching the order of the
specified destinations.
Note: Volumes and amounts arguments are mutually exclusive. Only one is required
volumes : str or Unit or list(str) or list(Unit)
Volume to transfer of the resource to each destination well. If
one volume is specified, each destination well receive that volume of the resource.
If destinations should receive different volumes, each
one should be specified explicitly in a list matching the order of the
specified destinations.
Note: Volumes and amounts arguments are mutually exclusive. Only one is required
Raises
------
TypeError
If resource_id is not a string.
TypeError
If the unit provided is not supported
TypeError
If volume or mass is not specified as a string or Unit (or a list of either)
RuntimeError
If length of the list of volumes or masses specified does not match the number
of destination wells specified.
ValueError
If the resource measurement mode is volume and the provision exceeds max capacity of well.
ValueError
If the provisioning of volumes or amounts are not supported.
Returns
-------
list(Provision)
:py:class:`autoprotocol.instruction.Provision` instruction object(s) to be appended and returned
"""
# Check valid well inputs
if not is_valid_well(dests):
raise TypeError(
"Destinations (dests) must be of type Well, "
"list of Wells, or WellGroup."
)
dests = WellGroup(dests)
if not isinstance(resource_id, str):
raise TypeError("Resource ID must be a string.")
if (volumes is None and amounts is None) or (volumes and amounts):
raise ValueError(
"Either volumes or amounts should have value(s), but not both."
)
if volumes:
provision_amounts = volumes
else:
provision_amounts = amounts
if not isinstance(provision_amounts, list):
provision_amounts = [Unit(provision_amounts)] * len(dests)
else:
if len(provision_amounts) != len(dests):
raise RuntimeError(
"To provision a resource into multiple "
"destinations with multiple volumes or masses, the "
"list of volumes or masses must correspond with the "
"destinations in length and in order."
)
provision_amounts = [
parse_unit(v, ["liter", "gram"]) for v in provision_amounts
]
measurement_mode = self._identify_provision_mode(provision_amounts)
provision_instructions_to_return: Provision = []
for d, amount in zip(dests, provision_amounts):
if d.container.is_covered() or d.container.is_sealed():
self._remove_cover(d.container, "provision")
xfer = {"well": d, measurement_mode: amount}
if measurement_mode == "volume":
d_max_vol = d.container.container_type.true_max_vol_ul
if amount > d_max_vol:
raise ValueError(
f"The volume you are trying to provision ({amount}) exceeds the "
f"maximum capacity of this well ({d_max_vol})."
)
if amount > Unit(900, "microliter"):
diff = amount - Unit(900, "microliter")
provision_instructions_to_return.append(
self.provision(resource_id, d, Unit(900, "microliter"))
)
while diff > Unit(0.0, "microliter"):
provision_instructions_to_return.append(
self.provision(resource_id, d, diff)
)
diff -= diff
continue
if d.volume:
d.volume += amount
else:
d.set_volume(amount)
dest_group = [xfer]
if (
self.instructions
and self.instructions[-1].op == "provision"
and self.instructions[-1].resource_id == resource_id
and self.instructions[-1].to[-1]["well"].container == d.container
):
self.instructions[-1].to.append(xfer)
else:
provision_instructions_to_return.append(
self._append_and_return(
Provision(resource_id, dest_group, measurement_mode)
)
)
return provision_instructions_to_return
def _identify_provision_mode(self, provision_amounts):
unique_measure_modes = set()
for amount in provision_amounts:
if not isinstance(amount, Unit):
raise TypeError(f"Provided amount {amount} is not supported.")
if amount.dimensionality == Unit(1, "liter").dimensionality:
unique_measure_modes.add("volume")
elif amount.dimensionality == Unit(1, "gram").dimensionality:
unique_measure_modes.add("mass")
else:
raise ValueError(
f"Provisioning of resources with measurement unit of {amount.unit} is not supported."
)
if len(unique_measure_modes) != 1:
raise ValueError("Received amounts with more than one measurement mode")
measurement_mode = unique_measure_modes.pop()
return measurement_mode
def flash_freeze(self, container, duration):
"""
Flash freeze the contents of the specified container by submerging it
in liquid nitrogen for the specified amount of time.
Example Usage:
.. code-block:: python
p = Protocol()
sample = p.ref("liquid_sample", None, "micro-1.5", discard=True)
p.flash_freeze(sample, "25:second")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"liquid_sample": {
"new": "micro-1.5",
"discard": true
}
},
"instructions": [
{
"duration": "25:second",
"object": "liquid_sample",
"op": "flash_freeze"
}
]
}
Parameters
----------
container : Container or str
Container to be flash frozen.
duration : str or Unit
Duration to submerge specified container in liquid nitrogen.
Returns
-------
FlashFreeze
Returns the :py:class:`autoprotocol.instruction.FlashFreeze`
instruction created from the specified parameters
"""
return self._append_and_return(FlashFreeze(container, duration))
def sonicate(
self, wells, duration, mode, mode_params, frequency=None, temperature=None
):
"""
Sonicate wells using high intensity ultrasonic vibrations.
Example Usage:
.. code-block:: python
p = Protocol()
sample_wells = p.ref("sample_plate",
None,
"96-pcr",
storage="warm_37").wells_from(0,2)
p.sonicate(sample_wells, duration="1:minute",
mode="bath",
mode_params={"sample_holder": "suspender"})
Autoprotocol Output:
.. code-block:: json
{
"op": "sonicate",
"wells": ["sample_plate/0", "sample_plate/1"],
"mode": "bath",
"duration": "1:minute",
"temperature": "ambient",
"frequency": "40:kilohertz"
"mode_params": {
"sample_holder": "suspender"
}
}
Parameters
----------
wells : Well or WellGroup or list(Well)
Wells to be sonicated
duration : Unit or str
Duration for which to sonicate wells
mode: Enum({"bath", "horn"})
Sonicating method to be used, must be "horn" or "bath". Sonicate
mode "horn" uses metal probe to create a localized shear force
directly in the sample media; "bath" mode applies ultrasound to
wells held inside a bath.
temperature: Unit or str, optional
Temperature at which the sample is kept during sonication. Optional,
defaults to ambient
frequency: Unit or str, optional
Frequency of the ultrasonic wave, usually indicated in kHz.
Optional; defaults to the most commonly used frequency for each
mode: 20 kHz for `horn`, and 40 kHz for `bath` mode
mode_params: Dict
Dictionary containing mode parameters for the specified mode.
.. code-block:: none
{
"mode": "bath",
"mode_params":
{
"sample_holder": Enum({"suspender",
"perforated_container",
"solid_container"})
"power": Unit or str, optional
}
}
- or -
{
"mode": "horn",
"mode_params":
{
"duty_cycle": Float, 0 < value <=1
"amplitude": Unit or str
}
}
Returns
-------
Sonicate
Returns the :py:class:`autoprotocol.instruction.Sonicate`
instruction created from the specified parameters
Raises
------
RuntimeError
If valid mode is not specified.
TypeError
If wells not of type WellGroup or List of Wells.
ValueError
If invalid `mode_params` for specified mode.
TypeError
If invalid `mode_params` type for specified mode.
"""
sonic_modes = ["bath", "horn"]
if mode not in sonic_modes:
raise RuntimeError(f"{mode} is not a valid sonication mode")
if not isinstance(mode_params, dict):
raise TypeError(f"Invalid mode_params {mode_params}, must be a dict")
parsed_mode_params = {}
if mode == "bath":
valid_sample_holders = [
"suspender",
"perforated_container",
"solid_container",
]
valid_mode_params = ["sample_holder", "power"]
sample_holder = mode_params.get("sample_holder")
if sample_holder not in valid_sample_holders:
raise ValueError(
f"'sample_holder' must be specified in 'mode_params' mode: "
f"{mode} and must be one of {valid_sample_holders}."
)
parsed_mode_params["sample_holder"] = sample_holder
power = mode_params.get("power")
if power:
parsed_power = parse_unit(power, "power-watt")
parsed_mode_params["power"] = parsed_power
frequency = frequency or "40:kilohertz"
if mode == "horn":
valid_mode_params = ["duty_cycle", "amplitude"]
if not all(k in mode_params for k in valid_mode_params):
raise ValueError(
f"Incorrect mode_params. All of "
f"{valid_mode_params} must be included in "
f"mode: {mode}"
)
duty_cycle = mode_params["duty_cycle"]
amplitude = mode_params["amplitude"]
if not isinstance(duty_cycle, (int, float)):
raise TypeError(f"Invalid duty_cycle {duty_cycle}, must be a decimal")
duty_cycle = float(duty_cycle)
if not 0 <= duty_cycle <= 1:
raise ValueError(
f"Invalid duty_cycle {duty_cycle}, must be between 0 and "
f"1 (inclusive)."
)
parsed_mode_params["duty_cycle"] = duty_cycle
parsed_amplitude = parse_unit(amplitude, "micrometer")
parsed_mode_params["amplitude"] = parsed_amplitude
frequency = frequency or "20:kilohertz"
if not is_valid_well(wells):
raise TypeError("Wells must be of type Well, list of Wells, or WellGroup.")
wells = WellGroup(wells)
parsed_duration = parse_unit(duration, "seconds")
parsed_frequency = parse_unit(frequency, "hertz")
if temperature:
parsed_temperature = parse_unit(temperature, "celsius")
else:
parsed_temperature = None
return self._append_and_return(
Sonicate(
wells,
parsed_duration,
mode,
parsed_mode_params,
parsed_frequency,
parsed_temperature,
)
)
def spe(
self,
well,
cartridge,
pressure_mode,
load_sample,
elute,
condition=None,
equilibrate=None,
rinse=None,
):
"""
Apply a solid phase extraction (spe) technique to a sample.
Example Usage:
.. code-block:: python
p = Protocol()
elute_params = [
SPE.builders.mobile_phase_params(
is_elute=True,
volume="2:microliter",
loading_flowrate="100:ul/second",
settle_time="2:minute",
processing_time="3:minute",
flow_pressure="2:bar",
resource_id="solvent_a",
destination_well=p.ref("Elute %s" % i, None,
"micro-1.5",
discard=True).well(0))
for i in range(3)
]
sample_loading_params = SPE.builders.mobile_phase_params(
volume="10:microliter", loading_flowrate="1:ul/second",
settle_time="2:minute", processing_time="3:minute",
flow_pressure="2:bar", is_sample=True)
cartridge = "spe_cartridge"
sample = p.ref("Sample", None, "micro-1.5", discard=True).well(0)
p.spe(sample, cartridge, "positive",
load_sample=sample_loading_params, elute=elute_params)
Autoprotocol Output:
.. code-block:: none
"instructions": [
{
"op": "spe",
"elute": [
{
"loading_flowrate": "100:microliter/second",
"resource_id": "solvent_a",
"settle_time": "2:minute",
"volume": "2:microliter",
"flow_pressure": "2:bar",
"destination_well": "Elute 0/0",
"processing_time": "3:minute"
},
{
"loading_flowrate": "100:microliter/second",
"resource_id": "solvent_a",
"settle_time": "2:minute",
"volume": "2:microliter",
"flow_pressure": "2:bar",
"destination_well": "Elute 1/0",
"processing_time": "3:minute"
},
{
"loading_flowrate": "100:microliter/second",
"resource_id": "solvent_a",
"settle_time": "2:minute",
"volume": "2:microliter",
"flow_pressure": "2:bar",
"destination_well": "Elute 2/0",
"processing_time": "3:minute"
}
],
"cartridge": "spe_cartridge",
"well": "Sample/0",
"load_sample": {
"flow_pressure": "2:bar",
"loading_flowrate": "1:microliter/second",
"settle_time": "2:minute",
"processing_time": "3:minute",
"volume": "10:microliter"
},
"pressure_mode": "positive"
}
]
Parameters
----------
well : Well
Well to solid phase extract.
cartridge : str
Cartridge to use for solid phase extraction.
pressure_mode : str
The direction of pressure applied to the cartridge to force
liquid flow. One of "positive", "negative".
load_sample: dict
Parameters for applying the sample to the cartridge.
Single 'mobile_phase_param'.
elute: list(dict)
Parameters for applying a mobile phase to the cartridge
with one or more solvents. List of 'mobile_phase_params'.
Requires `destination_well`.
condition: list(dict), optional
Parameters for applying a mobile phase to the cartridge
with one or more solvents. List of 'mobile_phase_params'.
equilibrate: list(dict), optional
Parameters for applying a mobile phase to the cartridge
with one or more solvents. List of 'mobile_phase_params'.
rinse: list(dict), optional
Parameters for applying a mobile phase to the cartridge
with one or more solvents. List of 'mobile_phase_params'.
mobile_phase_params:
resource_id: str
Resource ID of desired solvent.
volume: volume
Volume added to the cartridge.
loading_flowrate: Unit
Speed at which volume is added to cartridge.
settle_time: Unit
Duration for which the solvent remains on the cartridge
before a pressure mode is applied.
processing_time: Unit
Duration for which pressure is applied to the cartridge
after `settle_time` has elapsed.
flow_pressure: Unit
Pressure applied to the column.
destination_well: Well
Destination well for eluate. Required parameter for
each `elute` mobile phase parameter
Returns
-------
SPE
Returns the :py:class:`autoprotocol.instruction.SPE`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. well given is not of type Well
ValueError
Wells specified are not from the same container
ValueError
Invalid pressure_mode
ValueError
settle_time, processing_time, flow_pressure not greater than 0
ValueError
If not exactly one elution parameter for each elution container
UnitError
Improperly formatted units for mobile phase parameters
"""
if not is_valid_well(well):
raise TypeError("Well must be of type Well.")
if not isinstance(cartridge, str):
raise TypeError("Cartrige must be of type string.")
valid_pressure_modes = ["positive", "negative"]
if pressure_mode not in valid_pressure_modes:
raise ValueError(
f"'pressure_mode': {pressure_mode} has to be one "
f"of {valid_pressure_modes}"
)
load_sample = SPE.builders.mobile_phase_params(is_sample=True, **load_sample)
SPE.builders.spe_params(elute, is_elute=True)
if condition:
SPE.builders.spe_params(condition)
if equilibrate:
SPE.builders.spe_params(equilibrate)
if rinse:
SPE.builders.spe_params(rinse)
return self._append_and_return(
SPE(
well,
cartridge,
pressure_mode,
load_sample,
elute,
condition,
equilibrate,
rinse,
)
)
def image(
self,
ref,
mode,
dataref,
num_images=1,
backlighting=None,
exposure=None,
magnification=1.0,
):
"""
Capture an image of the specified container.
Example Usage:
.. code-block:: python
p = Protocol()
sample = p.ref("Sample", None, "micro-1.5", discard=True)
p.image(sample, "top", "image_1", num_images=3,
backlighting=False, exposure={"iso": 4},
magnification=1.0)
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"Sample": {
"new": "micro-1.5",
"discard": true
}
},
"instructions": [
{
"magnification": 1.0,
"backlighting": false,
"mode": "top",
"dataref": "image_1",
"object": "Sample",
"num_images": 3,
"op": "image",
"exposure": {
"iso": 4
}
}
]
}
Parameters
----------
ref : Container
Container of which to take image.
mode : Enum("top", "bottom", "side")
Angle of image.
num_images : int
Number of images taken of the container. Defaults to 1.
dataref : str
Name of data reference of resulting image
backlighting : Bool, optional
Whether back-lighting is desired.
magnification : float
Ratio of sizes of the image projected on the camera
sensor compared to the actual size of the object
captured. Defaults to 1.0.
exposure : dict, optional
Parameters to control exposure: "aperture", "iso",
and "shutter_speed".
shutter_speed: Unit, optional
Duration that the imaging sensor is exposed.
iso : Float, optional
Light sensitivity of the imaging sensor.
aperture: Float, optional
Diameter of the lens opening.
Returns
-------
Image
Returns the :py:class:`autoprotocol.instruction.Image`
instruction created from the specified parameters
Raises
------
TypeError
Invalid input types, e.g. num_images is not a positive integer
ValueError
Invalid exposure parameter supplied
"""
valid_image_modes = ["top", "bottom", "side"]
if not isinstance(ref, Container):
raise TypeError(f"image ref: {ref} has to be of type Container")
if mode not in valid_image_modes:
raise ValueError(
f"specified mode: {mode} must be one of " f"{valid_image_modes}"
)
if not isinstance(dataref, str):
raise TypeError("dataref must be of type String.")
if not isinstance(num_images, int) or num_images <= 0:
raise TypeError("num_images must be a positive integer.")
if magnification:
if not isinstance(magnification, (float, int)) or magnification <= 0:
raise TypeError("magnification must be a number.")
if backlighting:
if not isinstance(backlighting, bool):
raise TypeError("backlighting must be a boolean.")
if exposure:
valid_exposure_params = ["shutter_speed", "iso", "aperture"]
if not isinstance(exposure, dict):
raise TypeError(
f"exposure must be a dict with optional keys: "
f"{valid_exposure_params}."
)
if not all(k in valid_exposure_params for k in exposure):
raise ValueError(
f"Invalid exposure param. Valid params: "
f"{valid_exposure_params}."
)
shutter_speed = exposure.get("shutter_speed")
if shutter_speed:
shutter_speed = parse_unit(shutter_speed, "millimeter/s")
iso = exposure.get("iso")
if iso:
if not isinstance(iso, (float, int)):
raise TypeError("iso must be a number.")
aperture = exposure.get("aperture")
if aperture:
if not isinstance(aperture, (float, int)):
raise TypeError("aperture must be a number.")
return self._append_and_return(
Image(ref, mode, dataref, num_images, backlighting, exposure, magnification)
)
def _ref_for_well(self, well):
return "%s/%d" % (self._ref_for_container(well.container), well.index)
def _ref_for_container(self, container):
for k in self.refs:
v = self.refs[k]
if v.container is container:
return k
def _remove_cover(self, container, action):
if not container.container_type.is_tube:
if not (container.is_covered() or container.is_sealed()):
return
elif container.cover in COVER_TYPES:
self.uncover(container)
elif container.cover in SEAL_TYPES:
self.unseal(container)
else:
raise RuntimeError(
f"The operation {action} requires an uncovered container, "
f"however, {container.cover} is not a recognized cover or "
f"seal type."
)
def _add_cover(self, container, action):
if not container.container_type.is_tube:
if container.is_covered() or container.is_sealed():
return
elif "cover" in container.container_type.capabilities:
self.cover(container, container.container_type.cover_types[0])
elif "seal" in container.container_type.capabilities:
self.seal(container, container.container_type.seal_types[0])
else:
raise RuntimeError(
f"The operation {action} requires a covered container, "
f"however, {container.container_type.name} does not have "
f"a recognized cover or seal type."
)
def _add_seal(self, container, action):
if not container.container_type.is_tube:
if container.is_sealed():
return
elif container.is_covered():
raise RuntimeError(
f"The operation {action} requires a sealed container, "
f"however, {container.name} currently hasa lid which needs "
f"to be first removed."
)
if "seal" in container.container_type.capabilities:
self.seal(container, container.container_type.seal_types[0])
else:
raise RuntimeError(
f"The operation {action} requires a sealed container, "
f"however, {container.container_type.name} does not have "
f"a recognized seal type."
)
def _add_mag(self, sub_op, head, new_tip, new_instruction, sub_op_name):
"""
Append given magnetic_transfer groups to protocol
"""
last_instruction = self.instructions[-1] if self.instructions else None
maybe_same_instruction = (
new_instruction is False
and last_instruction
and isinstance(last_instruction, MagneticTransfer)
and last_instruction.data.get("magnetic_head") == head
)
# Overwriting __dict__ since that's edited on __init__ and we use it
# for downstream checks
if maybe_same_instruction and new_tip is True:
new_groups = last_instruction.data.get("groups")
new_groups.append([{sub_op_name: sub_op}])
last_instruction.__dict__ = MagneticTransfer(
groups=new_groups, magnetic_head=head
).__dict__
return last_instruction
elif maybe_same_instruction and new_tip is False:
new_groups = last_instruction.data.get("groups")
new_groups[-1].append({sub_op_name: sub_op})
last_instruction.__dict__ = MagneticTransfer(
groups=new_groups, magnetic_head=head
).__dict__
return last_instruction
else:
return self._append_and_return(
MagneticTransfer(groups=[[{sub_op_name: sub_op}]], magnetic_head=head)
)
# pylint: disable=protected-access
def _refify(self, op_data):
"""
Unpacks protocol objects into Autoprotocol compliant ones
Used by as_dict().
Parameters
----------
op_data: any protocol object
Returns
-------
dict or str or list or any
Autoprotocol compliant objects
"""
if type(op_data) is dict:
return {k: self._refify(v) for k, v in op_data.items()}
elif type(op_data) is list:
return [self._refify(i) for i in op_data]
elif isinstance(op_data, Well):
return self._ref_for_well(op_data)
elif isinstance(op_data, WellGroup):
return [self._ref_for_well(w) for w in op_data.wells]
elif isinstance(op_data, Container):
return self._ref_for_container(op_data)
elif isinstance(op_data, Unit):
return str(op_data)
elif isinstance(op_data, Instruction):
return self._refify(op_data._as_AST())
elif isinstance(op_data, Ref):
return op_data.opts
else:
return op_data
def _ref_containers_and_wells(self, params):
"""
Used by harness.run() to process JSON container and well references
.. code-block:: python
parameters = {
"sample": {
"id": null,
"type": "micro-1.5",
"storage": "cold_4",
"discard": null
},
"mastermix_loc": "sample_plate/A1",
"samples": [
"sample_plate/B1",
"sample_plate/B2",
"sample_plate/B3",
"sample_plate/B4"
]
}
protocol.make_well_references(parameters)
returns:
.. code-block:: python
{
"refs":{
"sample": Container(None, "micro-1.5")
},
"mastermix_loc": protocol.refs["sample_plate"].well("A1"),
"samples": WellGroup([
protocol.refs["sample_plate"].well("B1"),
protocol.refs["sample_plate"].well("B2"),
protocol.refs["sample_plate"].well("B3"),
protocol.refs["sample_plate"].well("B4")
])
}
Parameters
----------
params : dict
A dictionary of parameters to be passed to a protocol.
Returns
-------
dict
Dictionary of containers and wells
Raises
------
RuntimeError
Invalid parameters
"""
parameters = {}
containers = {}
# ref containers
for k, v in params.items():
if isinstance(v, dict):
parameters[str(k)] = self._ref_containers_and_wells(v)
if isinstance(v, list) and isinstance(v[0], dict):
for cont in v:
self._ref_containers_and_wells(cont.encode("utf-8"))
elif isinstance(v, dict) and "type" in v:
if "discard" in v:
discard = v["discard"]
if discard and v.get("storage"):
raise RuntimeError(
"You must either specify a storage "
"condition or set discard to true, "
"not both."
)
else:
discard = False
containers[str(k)] = self.ref(
k, v["id"], v["type"], storage=v.get("storage"), discard=discard
)
else:
parameters[str(k)] = v
parameters["refs"] = containers
# ref wells (must be done after reffing containers)
for k, v in params.items():
if isinstance(v, list) and "/" in str(v[0]):
group = WellGroup([])
for w in v:
cont, well = w.rsplit("/", 1)
group.append(self.refs[cont].container.well(well))
parameters[str(k)] = group
elif "/" in str(v):
ref_name = v.rsplit("/")[0]
if ref_name not in self.refs:
raise RuntimeError(
f"Parameters contain well references to a container that isn't referenced in this protocol: '{ref_name}'."
)
if v.rsplit("/")[1] == "all_wells":
parameters[str(k)] = self.refs[ref_name].container.all_wells()
else:
parameters[str(k)] = self.refs[ref_name].container.well(
v.rsplit("/")[1]
)
else:
parameters[str(k)] = v
return parameters
def measure_concentration(self, wells, dataref, measurement, volume="2:microliter"):
"""
Measure the concentration of DNA, ssDNA, RNA or protein in the
specified volume of the source aliquots.
Example Usage:
.. code-block:: python
p = Protocol()
test_plate = p.ref("test_plate", id=None, cont_type="96-flat",
storage=None, discard=True)
p.measure_concentration(test_plate.wells_from(0, 3), "mc_test",
"DNA")
p.measure_concentration(test_plate.wells_from(3, 3),
dataref="mc_test2", measurement="protein",
volume="4:microliter")
Autoprotocol Output:
.. code-block:: none
{
"refs": {
"test_plate": {
"new": "96-flat",
"discard": true
}
},
"instructions": [
{
"volume": "2.0:microliter",
"dataref": "mc_test",
"object": [
"test_plate/0",
"test_plate/1",
"test_plate/2"
],
"op": "measure_concentration",
"measurement": "DNA"
}, ...
]
}
Parameters
----------
wells : list(Well) or WellGroup or Well
WellGroup of wells to be measured
volume : str or Unit
Volume of sample required for analysis
dataref : str
Name of this specific dataset of measurements
measurement : str
Class of material to be measured. One of ["DNA", "ssDNA", "RNA",
"protein"].
Returns
-------
MeasureConcentration
Returns the
:py:class:`autoprotocol.instruction.MeasureConcentration`
instruction created from the specified parameters
Raises
------
TypeError
`wells` specified is not of a valid input type
"""
if not is_valid_well(wells):
raise TypeError("Wells must be of type Well, list of Wells, or WellGroup.")
wells = WellGroup(wells)
return self._append_and_return(
MeasureConcentration(wells, volume, dataref, measurement)
)
def measure_mass(self, container, dataref):
"""
Measure the mass of a container.
Example Usage:
.. code-block:: python
p = Protocol()
test_plate = p.ref("test_plate", id=None, cont_type="96-flat",
storage=None, discard=True)
p.measure_mass(test_plate, "test_data")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"test_plate": {
"new": "96-flat",
"discard": true
}
},
"instructions": [
{
"dataref": "test_data",
"object": [
"test_plate"
],
"op": "measure_mass"
}
]
}
Parameters
----------
container : Container
container to be measured
dataref : str
Name of this specific dataset of measurements
Returns
-------
MeasureMass
Returns the :py:class:`autoprotocol.instruction.MeasureMass`
instruction created from the specified parameters
Raises
------
TypeError
Input given is not of type Container
"""
if not isinstance(container, Container):
raise TypeError(f"{container} has to be of type Container")
return self._append_and_return(MeasureMass(container, dataref))
def measure_volume(self, wells, dataref):
"""
Measure the volume of each well in wells.
Example Usage:
.. code-block:: python
p = Protocol()
test_plate = p.ref("test_plate", id=None, cont_type="96-flat",
storage=None, discard=True)
p.measure_volume(test_plate.from_wells(0,2), "test_data")
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"test_plate": {
"new": "96-flat",
"discard": true
}
},
"instructions": [
{
"dataref": "test_data",
"object": [
"test_plate/0",
"test_plate/1"
],
"op": "measure_volume"
}
]
}
Parameters
----------
wells : list(Well) or WellGroup or Well
list of wells to be measured
dataref : str
Name of this specific dataset of measurements
Returns
-------
MeasureVolume
Returns the :py:class:`autoprotocol.instruction.MeasureVolume`
instruction created from the specified parameters
Raises
------
TypeError
`wells` specified is not of a valid input type
"""
if not is_valid_well(wells):
raise TypeError("Wells must be of type Well, list of Wells, or WellGroup.")
wells = WellGroup(wells)
return self._append_and_return(MeasureVolume(wells, dataref))
def count_cells(self, wells, volume, dataref, labels=None):
"""
Count the number of cells in a sample that are positive/negative
for a given set of labels.
Example Usage:
.. code-block:: python
p = Protocol()
cell_suspension = p.ref(
"cells_with_trypan_blue",
id=None,
cont_type="micro-1.5",
discard=True
)
p.count_cells(
cell_suspension.well(0),
"10:microliter",
"my_cell_count",
["trypan_blue"]
)
Autoprotocol Output:
.. code-block:: json
{
"refs": {
"cells_with_trypan_blue": {
"new": "micro-1.5",
"discard": true
}
},
"instructions": [
{
"dataref": "my_cell_count",
"volume": "10:microliter",
"wells": [
"cells_with_trypan_blue/0"
],
"labels": [
"trypan_blue"
],
"op": "count_cells"
}
]
}
Parameters
----------
wells: Well or list(Well) or WellGroup
List of wells that will be used for cell counting.
volume: Unit
Volume that should be consumed from each well for the purpose
of cell counting.
dataref: str
Name of dataset that will be returned.
labels: list(string), optional
Cells will be scored for presence or absence of each label
in this list. If staining is required to visualize these labels,
they must be added before execution of this instruction.
Returns
-------
CountCells
Returns the :py:class:`autoprotocol.instruction.CountCells`
instruction created from the specified parameters
Raises
------
TypeError
`wells` specified is not of a valid input type
"""
# Check valid well inputs
if not is_valid_well(wells):
raise TypeError("Wells must be of type Well, list of Wells, or WellGroup.")
wells = WellGroup(wells)
# Parse volume
parsed_volume = parse_unit(volume, "microliter")
# Eliminate duplicates from labels
parsed_labels = list(set(labels))
return self._append_and_return(
CountCells(wells, parsed_volume, dataref, parsed_labels)
)
def spectrophotometry(
self,
dataref,
obj,
groups,
interval=None,
num_intervals=None,
temperature=None,
shake_before=None,
):
"""
Generates an instruction with one or more plate reading steps
executed on a single plate with the same device. This could be
executed once, or at a defined interval, across some total duration.
Example Usage:
.. code-block:: python
p = Protocol()
read_plate = p.ref("read plate", cont_type="96-flat", discard=True)
groups = Spectrophotometry.builders.groups(
[
Spectrophotometry.builders.group(
"absorbance",
Spectrophotometry.builders.absorbance_mode_params(
wells=read_plate.wells(0, 1),
wavelength=["100:nanometer", "200:nanometer"],
num_flashes=15,
settle_time="1:second"
)
),
Spectrophotometry.builders.group(
"fluorescence",
Spectrophotometry.builders.fluorescence_mode_params(
wells=read_plate.wells(0, 1),
excitation=[
Spectrophotometry.builders.wavelength_selection(
ideal="650:nanometer"
)
],
emission=[
Spectrophotometry.builders.wavelength_selection(
shortpass="600:nanometer",
longpass="700:nanometer"
)
],
num_flashes=15,
settle_time="1:second",
lag_time="9:second",
integration_time="2:second",
gain=0.3,
read_position="top"
)
),
Spectrophotometry.builders.group(
"luminescence",
Spectrophotometry.builders.luminescence_mode_params(
wells=read_plate.wells(0, 1),
num_flashes=15,
settle_time="1:second",
integration_time="2:second",
gain=0.3
)
),
Spectrophotometry.builders.group(
"shake",
Spectrophotometry.builders.shake_mode_params(
duration="1:second",
frequency="9:hertz",
path="ccw_orbital",
amplitude="1:mm"
)
),
]
)
shake_before = Spectrophotometry.builders.shake_before(
duration="10:minute",
frequency="5:hertz",
path="ccw_orbital",
amplitude="1:mm"
)
p.spectrophotometry(
dataref="test data",
obj=read_plate,
groups=groups,
interval="10:minute",
num_intervals=2,
temperature="37:celsius",
shake_before=shake_before
)
Autoprotocol Output:
.. code-block:: json
{
"op": "spectrophotometry",
"dataref": "test data",
"object": "read plate",
"groups": [
{
"mode": "absorbance",
"mode_params": {
"wells": [
"read plate/0",
"read plate/1"
],
"wavelength": [
"100:nanometer",
"200:nanometer"
],
"num_flashes": 15,
"settle_time": "1:second"
}
},
{
"mode": "fluorescence",
"mode_params": {
"wells": [
"read plate/0",
"read plate/1"
],
"excitation": [
{
"ideal": "650:nanometer"
}
],
"emission": [
{
"shortpass": "600:nanometer",
"longpass": "700:nanometer"
}
],
"num_flashes": 15,
"settle_time": "1:second",
"lag_time": "9:second",
"integration_time": "2:second",
"gain": 0.3,
"read_position": "top"
}
},
{
"mode": "luminescence",
"mode_params": {
"wells": [
"read plate/0",
"read plate/1"
],
"num_flashes": 15,
"settle_time": "1:second",
"integration_time": "2:second",
"gain": 0.3
}
},
{
"mode": "shake",
"mode_params": {
"duration": "1:second",
"frequency": "9:hertz",
"path": "ccw_orbital",
"amplitude": "1:millimeter"
}
}
],
"interval": "10:minute",
"num_intervals": 2,
"temperature": "37:celsius",
"shake_before": {
"duration": "10:minute",
"frequency": "5:hertz",
"path": "ccw_orbital",
"amplitude": "1:millimeter"
}
}
Parameters
----------
dataref : str
Name of the resultant dataset to be returned.
obj : Container or str
Container to be read.
groups : list
A list of groups generated by SpectrophotometryBuilders groups
builders, any of absorbance_mode_params, fluorescence_mode_params,
luminescence_mode_params, or shake_mode_params.
interval : Unit or str, optional
The time between each of the read intervals.
num_intervals : int, optional
The number of times that the groups should be executed.
temperature : Unit or str, optional
The temperature that the entire instruction should be executed at.
shake_before : dict, optional
A dict of params generated by SpectrophotometryBuilders.shake_before
that dictates how the obj should be incubated with shaking before
any of the groups are executed.
Returns
-------
Spectrophotometry
Returns the :py:class:`autoprotocol.instruction.Spectrophotometry`
instruction created from the specified parameters
Raises
------
TypeError
Invalid num_intervals specified, must be an int
ValueError
No interval specified but shake groups specified with no duration
"""
groups = Spectrophotometry.builders.groups(groups)
if interval is not None:
interval = parse_unit(interval, "seconds")
if num_intervals and not isinstance(num_intervals, int):
raise TypeError(f"Invalid num_intervals {num_intervals}, must be an int.")
if temperature is not None:
temperature = parse_unit(temperature, "celsius")
if shake_before is not None:
shake_before = Spectrophotometry.builders.shake_before(**shake_before)
shake_groups = [_ for _ in groups if _["mode"] == "shake"]
any_shake_duration_undefined = any(
_["mode_params"].get("duration") is None for _ in shake_groups
)
if any_shake_duration_undefined and interval is None:
raise ValueError(
"If no interval is specified, then every shake group must "
"include a defined duration."
)
return self._append_and_return(
Spectrophotometry(
dataref=dataref,
object=obj,
groups=groups,
interval=interval,
num_intervals=num_intervals,
temperature=temperature,
shake_before=shake_before,
)
)
# pylint: disable=protected-access
def transfer(
self,
source,
destination,
volume,
rows=1,
columns=1,
source_liquid=LiquidClass,
destination_liquid=LiquidClass,
method=Transfer,
one_tip=False,
density=None,
mode=None,
):
"""Generates LiquidHandle instructions between wells
Transfer liquid between specified pairs of source & destination wells.
Parameters
----------
source : Well or WellGroup or list(Well)
Well(s) to transfer liquid from.
destination : Well or WellGroup or list(Well)
Well(s) to transfer liquid to.
volume : str or Unit or list(str) or list(Unit)
Volume(s) of liquid to be transferred from source wells to
destination wells. The number of volumes specified must
correspond to the number of destination wells.
rows : int, optional
Number of rows to be concurrently transferred
columns : int, optional
Number of columns to be concurrently transferred
source_liquid : LiquidClass or list(LiquidClass), optional
Type(s) of liquid contained in the source Well. This affects the
aspirate and dispense behavior including the flowrates,
liquid level detection thresholds, and physical movements.
destination_liquid : LiquidClass or list(LiquidClass), optional
Type(s) of liquid contained in the destination Well. This affects
liquid level detection thresholds.
method : Transfer or list(Transfer), optional
Integrates with the specified source_liquid and destination_liquid
to define a set of physical movements.
one_tip : bool, optional
If True then a single tip will be used for all operations
density : Unit or str, optional
Density of the liquid to be aspirated/dispensed
mode : str, optional
The liquid handling mode
Returns
-------
list(LiquidHandle)
Returns a list of :py:class:`autoprotocol.instruction.LiquidHandle`
instructions created from the specified parameters
Raises
------
ValueError
if the specified parameters can't be interpreted as lists of
equal length
ValueError
if one_tip is true, but not all transfer methods have a tip_type
Examples
--------
Transfer between two single wells
.. code-block:: python
from autoprotocol import Protocol, Unit
p = Protocol()
source = p.ref("source", cont_type="384-flat", discard=True)
destination = p.ref(
"destination", cont_type="394-pcr", discard=True
)
p.transfer(source.well(0), destination.well(1), "5:ul")
Sequential transfers between two groups of wells
.. code-block:: python
sources = source.wells_from(0, 8, columnwise=True)
dests = destination.wells_from(1, 8, columnwise=True)
volumes = [Unit(x, "ul") for x in range(1, 9)]
p.transfer(sources, dests, volumes)
Concurrent transfers between two groups of wells
.. code-block:: python
# single-column concurrent transfer
p.transfer(
source.well(0), destination.well(1), "5:ul", rows=8
)
# 96-well concurrent transfer from the A1 to B2 quadrants
p.transfer(
source.well(0), destination.well(13), "5:ul", rows=8, columns=12
)
# 384-well concurrent transfer
p.transfer(
source.well(0), destination.well(0), "5:ul", rows=16, columns=24
)
Transfer with extra parameters
.. code-block:: python
from autoprotocol.liquid_handle import Transfer
from autoprotocol.instruction import LiquidHandle
p.transfer(
source.well(0), destination.well(0), "5:ul",
method=Transfer(
mix_before=True,
dispense_z=LiquidHandle.builders.position_z(
reference="well_top"
)
)
)
Transfer using other built in Transfer methods
.. code-block:: python
from autoprotocol.liquid_handle import DryWellTransfer
p.transfer(
source.well(0), destination.well(1), "5:ul",
method=DryWellTransfer
)
For examples of other more complicated behavior, see the
documentation for LiquidHandleMethod.
See Also
--------
Transfer : base LiquidHandleMethod for transfer operations
"""
def location_helper(source, destination, volume, method, density):
"""Generates LiquidHandle transfer locations
Parameters
----------
source : Well
Well to transfer liquid from.
destination : Well
Well to transfer liquid to.
volume : Unit
The volume of liquid to be transferred from source well to
destination well.
method : Transfer
Integrates the two input liquid classes and defines a set of
transfers based on their attributes and methods.
density : Unit
The density of liquid to be aspirated/dispensed.
Returns
-------
list(dict)
LiquidHandle locations
"""
self._remove_cover(source.container, "liquid_handle from")
self._remove_cover(destination.container, "liquid_handle into")
self._transfer_volume(source, destination, volume, method._shape)
return [
LiquidHandle.builders.location(
location=source,
# pylint: disable=protected-access
transports=method._aspirate_transports(volume, density),
),
LiquidHandle.builders.location(
location=destination,
# pylint: disable=protected-access
transports=method._dispense_transports(volume, density),
),
]
# validate parameter types
source = WellGroup(source)
destination = WellGroup(destination)
count = max((len(source), len(destination)))
if len(source) == 1:
source = WellGroup([source[0]] * count)
if len(destination) == 1:
destination = WellGroup([destination[0]] * count)
if not isinstance(volume, list):
volume = [volume] * count
volume = [parse_unit(_, "uL") for _ in volume]
if density:
if not isinstance(density, list):
density = [density] * count
elif len(density) != count:
raise ValueError(
f"the length of provided density: {len(density)} does not match the number of transports."
)
density = [parse_unit(d, "mg/ml") for d in density]
for d in density:
if d.magnitude <= 0:
raise ValueError(f"Density: {d} must be a value larger than 0.")
else:
# if density is None, it should still be a list of None
density = [density] * count
if not isinstance(source_liquid, list):
source_liquid = [source_liquid] * count
source_liquid = [_validate_as_instance(_, LiquidClass) for _ in source_liquid]
if not isinstance(destination_liquid, list):
destination_liquid = [destination_liquid] * count
destination_liquid = [
_validate_as_instance(_, LiquidClass) for _ in destination_liquid
]
if not isinstance(method, list):
method = [method] * count
method = [_validate_as_instance(_, Transfer) for _ in method]
# validate parameter counts
countable_parameters = (
source,
destination,
volume,
density,
source_liquid,
destination_liquid,
method,
)
correct_parameter_counts = all(len(_) == count for _ in countable_parameters)
if not correct_parameter_counts:
raise ValueError(
f"Specified parameters {countable_parameters} could not all be interpreted as the same length {correct_parameter_counts}."
)
# format shape
shape = LiquidHandle.builders.shape(rows, columns, None)
# validate all containers against the shape
for aliquot in sum(source, destination):
container_type = aliquot.container.container_type
_check_container_type_with_shape(container_type, shape)
# apply liquid classes to transfer methods
for src, des, met in zip(source_liquid, destination_liquid, method):
met._shape = LiquidHandle.builders.shape(**shape)
met._source_liquid = src
met._destination_liquid = des
# apply tip types to transfer methods
for vol, met in zip(volume, method):
if met._has_calibration() and not met.tip_type:
try:
met.tip_type = met._rec_tip_type(vol)
except RuntimeError:
met.tip_type = met._get_sorted_tip_types()[-1].name
# if one tip is true then all methods need to have the same tip_type
if one_tip is True:
tip_types = [_.tip_type for _ in method]
if not all(_ == tip_types[0] for _ in tip_types):
raise ValueError(
f"If one_tip is true and any tip_type is set, then all tip types must be the same but {tip_types} was specified."
)
# generate either a LiquidHandle location or instruction list
locations, instructions = [], []
for src, des, vol, met, dens in zip(
source, destination, volume, method, density
):
max_tip_capacity = met._tip_capacity()
remaining_vol = vol
while remaining_vol > Unit(0, "ul"):
transfer_vol = min(remaining_vol, max_tip_capacity)
if one_tip is True:
locations += location_helper(src, des, transfer_vol, met, dens)
else:
location_transports = location_helper(
src, des, transfer_vol, met, dens
)
source_transports = location_transports[0]["transports"]
instruction_mode = mode
if not instruction_mode:
instruction_mode = LiquidHandle.builders.desired_mode(
source_transports, mode
)
instructions.append(
LiquidHandle(
location_transports,
shape=met._shape,
mode=instruction_mode,
mode_params=(
LiquidHandle.builders.instruction_mode_params(
tip_type=met.tip_type
)
),
)
)
remaining_vol -= transfer_vol
# if one tip is true then there's a locations list
if locations:
source_transports = locations[0]["transports"]
# if not mode:
mode = LiquidHandle.builders.desired_mode(source_transports, mode)
instructions.append(
LiquidHandle(
locations,
shape=shape,
mode=mode,
mode_params=LiquidHandle.builders.instruction_mode_params(
tip_type=method[0].tip_type
),
)
)
return self._append_and_return(instructions)
# pylint: disable=protected-access
def mix(
self,
well,
volume,
rows=1,
columns=1,
liquid=LiquidClass,
method=Mix,
one_tip=False,
mode=None,
):
"""Generates LiquidHandle instructions within wells
Mix liquid in specified wells.
Parameters
----------
well : Well or WellGroup or list(Well)
Well(s) to be mixed.
volume : str or Unit or list(str) or list(Unit)
Volume(s) of liquid to be mixed within the specified well(s).
The number of volume(s) specified must correspond with the number
of well(s).
rows : int, optional
Number of rows to be concurrently mixed
columns : int, optional
Number of columns to be concurrently mixed
liquid : LiquidClass or list(LiquidClass), optional
Type(s) of liquid contained in the Well(s). This affects the
aspirate and dispense behavior including the flowrates,
liquid level detection thresholds, and physical movements.
method : Mix or list(Mix), optional
Method(s) with which Integrates with the specified liquid to
define a set of physical movements.
one_tip : bool, optional
If True then a single tip will be used for all operations
mode : str, optional
The liquid handling mode
Returns
-------
list(LiquidHandle)
Returns a list of :py:class:`autoprotocol.instruction.LiquidHandle`
instructions created from the specified parameters
Raises
------
ValueError
if the specified parameters can't be interpreted as lists of
equal length
ValueError
if one_tip is true, but not all mix methods have a tip_type
ValueError
if the specified volume is larger than the maximum tip capacity
of the available liquid_handling devices for a given mix
Examples
--------
Mix within a single well
.. code-block:: python
from autoprotocol import Protocol, Unit
p = Protocol()
plate = p.ref("example_plate", cont_type="384-flat", discard=True)
p.mix(plate.well(0), "5:ul")
Sequential mixes within multiple wells
.. code-block:: python
wells = plate.wells_from(0, 8, columnwise=True)
volumes = [Unit(x, "ul") for x in range(1, 9)]
p.mix(wells, volumes)
Concurrent mixes within multiple wells
.. code-block:: python
# single-column concurrent mix
p.mix(plate.well(0), "5:ul", rows=8)
# 96-well concurrent mix in the A1 quadrant
p.mix(plate.well(0), "5:ul", rows=8, columns=12)
# 96-well concurrent mix in the A2 quadrant
p.mix(plate.well(1), "5:ul", rows=8, columns=12)
# 384-well concurrent mix
p.mix(plate.well(0), "5:ul", rows=16, columns=24)
Mix with extra parameters
.. code-block:: python
from autoprotocol.liquid_handle import Mix
from autoprotocol.instruction import LiquidHandle
p.mix(
plate.well(0), "5:ul", rows=8,
method=Mix(
mix_params=LiquidHandle.builders.mix(
)
)
)
See Also
--------
Mix : base LiquidHandleMethod for mix operations
"""
def location_helper(aliquot, volume, method):
"""Generates LiquidHandle mix locations
Parameters
----------
aliquot : Well
Wells to transfer mix liquid in.
volume : Unit
The volume of liquid to be transferred within the aliquot.
method : Mix
Integrates the input liquid class and defines a set of
transfers based on its attributes and methods.
Returns
-------
list(dict)
LiquidHandle locations
"""
self._remove_cover(aliquot.container, "liquid_handle in")
return [
LiquidHandle.builders.location(
location=aliquot, transports=method._mix_transports(volume)
)
]
# validate parameter types
well = WellGroup(well)
count = len(well)
if not isinstance(volume, list):
volume = [volume] * count
volume = [parse_unit(_, "uL") for _ in volume]
if not isinstance(liquid, list):
liquid = [liquid] * count
liquid = [_validate_as_instance(_, LiquidClass) for _ in liquid]
if not isinstance(method, list):
method = [method] * count
method = [_validate_as_instance(_, Mix) for _ in method]
# validate parameter counts
countable_parameters = (well, volume, liquid, method)
correct_parameter_counts = all(len(_) == count for _ in countable_parameters)
if not correct_parameter_counts:
raise ValueError(
f"Specified parameters {countable_parameters} could not all be interpreted as the same length {correct_parameter_counts}."
)
# format shape
shape = LiquidHandle.builders.shape(rows, columns, None)
# validate all containers against the shape
for aliquot in well:
container_type = aliquot.container.container_type
_check_container_type_with_shape(container_type, shape)
# apply liquid classes to mix methods
for liq, met in zip(liquid, method):
met._shape = LiquidHandle.builders.shape(**shape)
met._liquid = liq
# apply tip types to mix methods
for vol, met in zip(volume, method):
if met._has_calibration() and not met.tip_type:
try:
met.tip_type = met._rec_tip_type(vol)
except RuntimeError:
met.tip_type = met._get_sorted_tip_types()[-1].name
# if one tip is true then all methods need to have the same tip_type
if one_tip is True:
tip_types = [_.tip_type for _ in method]
if not all(_ == tip_types[0] for _ in tip_types):
raise ValueError(
f"If one_tip is true and any tip_type is set, then all tip types must be the same but {tip_types} was specified."
)
# generate either a LiquidHandle location or instruction list
locations, instructions = [], []
for wel, vol, met in zip(well, volume, method):
max_tip_capacity = met._tip_capacity()
if vol > max_tip_capacity:
raise ValueError(
f"Attempted mix volume {vol} is larger than the maximum capacity of {max_tip_capacity} for transfer shape {vol.shape}."
)
self._remove_cover(wel.container, "liquid_handle mix")
if one_tip is True:
locations += location_helper(wel, vol, met)
else:
location_transports = location_helper(wel, vol, met)
source_transports = location_transports[0]["transports"]
instruction_mode = mode
if not instruction_mode:
instruction_mode = LiquidHandle.builders.desired_mode(
source_transports, mode
)
instructions.append(
LiquidHandle(
location_transports,
shape=met._shape,
mode=instruction_mode,
mode_params=(
LiquidHandle.builders.instruction_mode_params(
tip_type=met.tip_type
)
),
)
)
# if one tip is true then there's a locations list
if locations:
source_transports = locations[0]["transports"]
if not mode:
mode = LiquidHandle.builders.desired_mode(source_transports, mode)
instructions.append(
LiquidHandle(
locations,
shape=shape,
mode=mode,
mode_params=LiquidHandle.builders.instruction_mode_params(
tip_type=method[0].tip_type
),
)
)
return self._append_and_return(instructions)
def spread(
self,
source,
dest,
volume="50:microliter",
dispense_speed="20:microliter/second",
):
"""
Spread the specified volume of the source aliquot across the surface of
the agar contained in the object container.
Uses a spiral pattern generated by a set of liquid_handle instructions.
Example Usage:
.. code-block:: python
p = Protocol()
agar_plate = p.ref("agar_plate", None, "1-flat", discard=True)
bact = p.ref("bacteria", None, "micro-1.5", discard=True)
p.spread(bact.well(0), agar_plate.well(0), "55:microliter")
Parameters
----------
source : Well
Source of material to spread on agar
dest : Well
Reference to destination location (plate containing agar)
volume : str or Unit, optional
Volume of source material to spread on agar
dispense_speed : str or Unit, optional
Speed at which to dispense source aliquot across agar surface
Returns
-------
LiquidHandle
Returns a :py:class:`autoprotocol.instruction.LiquidHandle`
instruction created from the specified parameters
Raises
------
TypeError
If specified source is not of type Well
TypeError
If specified destination is not of type Well
"""
def euclidean_distance(point_a, point_b):
"""
Calculate the euclidean distance between a pair of xy coordinates
Parameters
----------
point_a: Iterable
First point
point_b: Iterable
Second point
Returns
-------
float
The distance between the two points
"""
from math import sqrt
x_distance = abs(point_a[0] - point_b[0])
y_distance = abs(point_a[1] - point_b[1])
return sqrt(x_distance ** 2 + y_distance ** 2)
# Check validity of Well inputs
if not isinstance(source, Well):
raise TypeError("Source must be of type Well.")
if not isinstance(dest, Well):
raise TypeError("Destination, (dest), must be of type Well.")
self._remove_cover(source.container, "spread")
self._remove_cover(dest.container, "spread")
volume = Unit(volume)
if dest.volume:
dest.volume += volume
else:
dest.volume = volume
if source.volume:
source.volume -= volume
aspirate_transport_list = [
LiquidHandle.builders.transport(
mode_params=LiquidHandle.builders.mode_params(
position_z=LiquidHandle.builders.position_z(
reference="liquid_surface",
offset=Unit("-1:mm"),
detection_method="capacitance",
detection_threshold=AGAR_CLLD_THRESHOLD,
)
)
),
LiquidHandle.builders.transport(
volume=-volume,
mode_params=LiquidHandle.builders.mode_params(
position_z=LiquidHandle.builders.position_z(
reference="liquid_surface",
detection_method="tracked",
offset=Unit("-1.0:mm"),
)
),
),
]
dispense_transport_list = [
LiquidHandle.builders.transport(
mode_params=LiquidHandle.builders.mode_params(
position_z=LiquidHandle.builders.position_z(
reference="liquid_surface",
detection_method="capacitance",
detection_threshold=AGAR_CLLD_THRESHOLD,
)
)
)
]
distances = [
euclidean_distance(first, second)
for first, second in zip(SPREAD_PATH, SPREAD_PATH[1:])
]
distance_total = sum(distances)
distance_ratios = [dist / distance_total for dist in distances]
for ratio, position in zip(distance_ratios, SPREAD_PATH[1:]):
dispense_transport_list += [
LiquidHandle.builders.transport(
volume=volume * ratio,
flowrate=LiquidHandle.builders.flowrate(dispense_speed),
mode_params=LiquidHandle.builders.mode_params(
position_x=LiquidHandle.builders.position_xy(position[0]),
position_y=LiquidHandle.builders.position_xy(position[1]),
position_z=LiquidHandle.builders.position_z(
reference="liquid_surface",
detection_method="tracked",
offset=Unit("0.5:mm"),
),
),
)
]
location = [
LiquidHandle.builders.location(
location=source, transports=aspirate_transport_list
),
LiquidHandle.builders.location(
location=dest, transports=dispense_transport_list
),
]
return self._append_and_return(LiquidHandle(location))
def _transfer_volume(self, source, destination, volume, shape):
"""
Transfers volume and properties between aliquots.
Parameters
----------
source : Well
The shape origin to be transferred from
destination : Well
The shape origin to be transferred to
volume : Unit
The volume to be transferred
shape : dict
See Also Instruction.builders.shape
Raises
------
RuntimeError
If the inferred sources and destinations aren't the same length
"""
source_wells = source.container.wells_from_shape(source.index, shape)
dest_wells = destination.container.wells_from_shape(destination.index, shape)
if not len(source_wells) == len(dest_wells):
raise RuntimeError(
f"Transfer source: {source_wells} and destination: "
f"{dest_wells} WellGroups didn't have the same number of wells."
)
for source_well, dest_well in zip(source_wells, dest_wells):
if self.propagate_properties:
dest_well.add_properties(source_well.properties)
if source_well.volume is not None:
source_well.volume -= volume
if dest_well.volume is not None:
dest_well.volume += volume
else:
dest_well.volume = volume
def evaporate(self, ref, mode, duration, evaporator_temperature, mode_params=None):
"""
Removes liquid or moisture from a container using the mode specified.
Example Usage:
.. code-block:: python
p = Protocol()
c = p.ref("container", id=None,
cont_type="micro-1.5", storage="cold_20")
blowdown_params = Evaporate.builders.get_mode_params(
mode="blowdown", mode_params={
"gas":"nitrogen",
"vortex_speed":Unit("200:rpm"),
"blow_rate": "200:uL/sec"
})
p.evaporate(c,
mode="blowdown",
duration="10:minute",
evaporator_temperature="22:degC",
mode_params = blowdown_params
.. code-block:: json
{
"op": "evaporate",
"ref": "container",
"mode": "blowdown",
"duration": "10:minute",
"evaporator_temperature": "22:degC",
"mode_params": {
"gas": "ntirogen",
"vortex_speed": "200:rpm",
"blow_rate": "200:uL/sec"
}
}
Parameters
----------
ref : Container
Sample container
mode : Str
The mode of evaporation method
duration : Unit or Str
The length of time the sample is evaporated for
evaporator_temperature : Unit or str
The incubation temperature of the sample being evaporated
mode_params : Dict
Dictionary of parameters for evaporation mode
Returns
-------
Evaporate
Returns a :py:class:`autoprotocol.instruction.Evaporate`
instruction created from the specified parameters
Raises
------
TypeError
If the provided object is not a Container type.
ValueError
If the duration is less than 0 minute
TypeError
If evaporator_temperature is not provided in Unit or str
ValueError
If the evaporation_temperature is lower than or equal to
condenser_temperature
"""
duration = parse_unit(duration, "minute")
evaporator_temperature = parse_unit(evaporator_temperature, "celsius")
mode_params = Evaporate.builders.get_mode_params(mode, mode_params)
if not isinstance(ref, Container):
raise TypeError("Param `ref` must be a container object.")
if duration <= Unit("0:minute"):
raise ValueError(
f"Param `duration`: {duration} should be longer than 0 minute."
)
if mode_params:
condenser_temp = mode_params.get("condenser_temperature")
if "condenser_temperature" in mode_params.keys():
if condenser_temp >= evaporator_temperature:
raise ValueError(
f"Param `condenser_temperature`: {condenser_temp} "
"cannot be higher than the evaporator_temperature:"
f" {evaporator_temperature}"
)
return self._append_and_return(
Evaporate(
ref=ref,
duration=duration,
evaporator_temperature=evaporator_temperature,
mode=mode,
mode_params=mode_params,
)
)
| 35.550162
| 212
| 0.488905
|
c28ca180cf30536f8896dcfd8956ae8d688e844a
| 1,397
|
py
|
Python
|
sims/s279/plot-jz-extent.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 1
|
2019-12-19T16:21:13.000Z
|
2019-12-19T16:21:13.000Z
|
sims/s279/plot-jz-extent.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | null | null | null |
sims/s279/plot-jz-extent.py
|
ammarhakim/ammar-simjournal
|
85b64ddc9556f01a4fab37977864a7d878eac637
|
[
"MIT",
"Unlicense"
] | 2
|
2020-01-08T06:23:33.000Z
|
2020-01-08T07:06:50.000Z
|
import numpy
from pylab import *
import tables
rc('text', usetex=True)
def findXloc(X, fx, val):
cross = []
for i in range(X.shape[0]-1):
if val>fx[i] and val<fx[i+1]:
cross.append(0.5*(X[i]+X[i+1]))
elif val<fx[i] and val>fx[i+1]:
cross.append(0.5*(X[i]+X[i+1]))
return cross
LX = 50.0
LY = 25.0
B0 = 1/15.0
n0 = 1.0
mu0 = 1.0
elcCharge = -1.0
ionCharge = 1.0
ionMass = 1.0
elcMass = ionMass/25
va = B0/sqrt(mu0*elcMass*n0)
ionCycl = ionCharge*B0/ionMass
timeSlice = []
extent = []
for i in range(15,41):
print ('Working on %d ...' % i)
fh = tables.openFile ("s279-gem-tenmom_q_%d.h5" % i)
tm = fh.root.timeData._v_attrs['vsTime']*ionCycl
q = fh.root.StructGridField
nx, ny = q.shape[0], q.shape[1]
X = linspace(-LX/2, LX/2, nx)
Y = linspace(-LY/2, LY/2, ny)
ux = q[:,:,1]/q[:,:,0]
ux1 = ux[:,ny/2]
minix = ux1.argmin()
maxix = ux1.argmax()
minx = X[minix]
maxx = X[maxix]
# store time and current sheet extent
timeSlice.append(fh.root.timeData._v_attrs['vsTime']*ionCycl)
extent.append(maxx-minx)
timeSliceA = numpy.array(timeSlice)
extentA = numpy.array(extent)
plot(timeSliceA, extentA, '-ro')
title('10M Current sheet extension v/s time')
xlabel('Time ($t\Omega_{ci}$)')
ylabel('Extension ($d_i$)')
savefig('s279-10m-current-extension-vs-t.png')
show()
| 22.532258
| 65
| 0.600573
|
f2ece11d0132b9ad0967de59fc926acad68c4053
| 5,492
|
py
|
Python
|
options/base_options.py
|
KshingWang/LesionSeg
|
a3c38aa7481eb7ce6a3b0fe5f9c4b349b8cf0b19
|
[
"BSD-3-Clause"
] | 16
|
2020-02-22T02:05:07.000Z
|
2021-11-18T14:34:53.000Z
|
options/base_options.py
|
KshingWang/LesionSeg
|
a3c38aa7481eb7ce6a3b0fe5f9c4b349b8cf0b19
|
[
"BSD-3-Clause"
] | 9
|
2020-04-30T13:53:39.000Z
|
2021-12-15T09:43:26.000Z
|
options/base_options.py
|
KshingWang/LesionSeg
|
a3c38aa7481eb7ce6a3b0fe5f9c4b349b8cf0b19
|
[
"BSD-3-Clause"
] | 7
|
2020-02-22T18:25:23.000Z
|
2022-01-26T12:06:53.000Z
|
import argparse
import os
from util import util
import torch
import models
import data
from configurations import *
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', default=PATH_DATASET)
parser.add_argument('--batch_size', type=int, default=16, help='input batch size')
parser.add_argument('--testSize', type=int, default=256, help='the image size used in val/test phases')
parser.add_argument('--trainSize', type=int, default=128, help='then image size used in train phase')
parser.add_argument('--display_winsize', type=int, default=128, help='display window size for both visdom and HTML')
parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--dataset_mode', type=str, default='ms', help='chooses how datasets are loaded. [unaligned | aligned | single]')
parser.add_argument('--model', type=str, default='ms', help='currently only ms model available')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='../Checkpoints', help='models are saved here')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_size{trainSize}')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '%s_opt.txt' % opt.phase)
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 50.385321
| 223
| 0.641843
|
7f7904395c8a63fe6e64f48c53bf10491a6ef52b
| 7,540
|
py
|
Python
|
20180617/CombineStackingAndCatBoostKfold/Feature.py
|
fengjiaxin/Home_Credit_Default_Risk
|
3407e76b4e5cfb8dd6056d24675b80fe0e82c123
|
[
"Apache-2.0"
] | 26
|
2018-06-13T07:34:16.000Z
|
2020-12-07T16:38:25.000Z
|
20180617/CombineStackingAndCatBoostKfold/Feature.py
|
fengjiaxin/Home_Credit_Default_Risk
|
3407e76b4e5cfb8dd6056d24675b80fe0e82c123
|
[
"Apache-2.0"
] | 1
|
2019-05-02T12:48:31.000Z
|
2019-05-25T09:06:22.000Z
|
20180617/CombineStackingAndCatBoostKfold/Feature.py
|
fengjiaxin/Home_Credit_Default_Risk
|
3407e76b4e5cfb8dd6056d24675b80fe0e82c123
|
[
"Apache-2.0"
] | 6
|
2018-08-02T11:03:33.000Z
|
2021-11-09T10:42:11.000Z
|
# coding:utf-8
import os
import gc
import sys
import importlib
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from mlxtend.classifier import StackingCVClassifier
class Feature(object):
def __init__(self, *, input_path, output_path):
self.__fill_value_transformer_array = importlib.import_module(
"20180603.StackingReallyMax.FillValueTransformerArray"
)
self.__indicate_feature_transformer_array = importlib.import_module(
"20180603.StackingReallyMax.IndicateFeatureTransformerArray"
)
self.__input_path = input_path
self.__output_path = output_path
# data prepare
self.__train, self.__test = [None for _ in range(2)]
self.__train_feature, self.__train_label = [None for _ in range(2)]
self.__test_feature = None
# model fit
self.__rfc = None
self.__etc = None
self.__gbc = None
self.__xbc = None
self.__lr_l1 = None
self.__lr_l2 = None
self.__net = None
self.__knc = None
self.__rfc_pl = None
self.__etc_pl = None
self.__gbc_pl = None
self.__xbc_pl = None
self.__lr_l1_pl = None
self.__lr_l2_pl = None
self.__net_pl = None
self.__knc_pl = None
self.__clf = None
self.__oof_train = None
self.__oof_test = None
# model_feature_output
self.__feature_train = None
self.__feature_test = None
def data_prepare(self):
self.__train = pd.read_csv(os.path.join(self.__input_path, "first_layer_train.csv"))
self.__test = pd.read_csv(os.path.join(self.__input_path, "first_layer_test.csv"))
self.__train_label = self.__train["TARGET"]
self.__train_feature = self.__train.drop(["TARGET"], axis=1)
self.__test_feature = self.__test
del self.__train, self.__test
gc.collect()
def model_fit(self):
self.__rfc = RandomForestClassifier(n_jobs=-1)
self.__etc = ExtraTreesClassifier(n_jobs=-1)
self.__gbc = GradientBoostingClassifier()
self.__xbc = XGBClassifier(n_jobs=-1)
self.__lr_l1 = LogisticRegression(penalty="l1")
self.__lr_l2 = LogisticRegression(penalty="l2")
self.__net = MLPClassifier()
self.__knc = KNeighborsClassifier(n_jobs=-1)
self.__rfc_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=-999.0)),
("Clf", self.__rfc)
]
)
self.__etc_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=-999.0)),
("Clf", self.__etc)
]
)
self.__gbc_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=-999.0)),
("Clf", self.__gbc)
]
)
self.__xbc_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=-999.0)),
("Clf", self.__xbc)
]
)
self.__lr_l1_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=0)),
("Clf", self.__lr_l1)
]
)
self.__lr_l2_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=0)),
("Clf", self.__lr_l2)
]
)
self.__net_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=0)),
("Clf", self.__net)
]
)
self.__knc_pl = Pipeline(
steps=[
("AddIndicator", self.__indicate_feature_transformer_array.IndicateFeatureTransformerArray(columns=[9, 10, 11])),
("FillNa", self.__fill_value_transformer_array.FillValueTransformerArray(filling_values=0)),
("Clf", self.__knc)
]
)
self.__clf = StackingCVClassifier(
classifiers=[self.__rfc_pl,
self.__etc_pl,
self.__gbc_pl,
self.__xbc_pl,
self.__lr_l1_pl,
self.__lr_l2_pl,
self.__net_pl,
self.__knc_pl],
meta_classifier=self.__lr_l1_pl,
use_probas=True,
cv=2,
store_train_meta_features=True,
verbose=True
)
self.__clf.fit(self.__train_feature.values, self.__train_label.values)
self.__oof_train = self.__clf.train_meta_features_
self.__oof_test = self.__clf.predict_meta_features(self.__test_feature.values)
self.__oof_train = self.__oof_train[:, [i for i in range(2*len(self.__clf.classifiers)) if i % 2 != 0]]
self.__oof_test = self.__oof_test[:, [i for i in range(2*len(self.__clf.classifiers)) if i % 2 != 0]]
self.__oof_train = pd.DataFrame(
self.__oof_train,
columns=["rf_2", "et_2", "gb_2", "xg_2", "lr_l1_2", "lr_l2_2", "net_2", "knc_2"]
)
self.__oof_test = pd.DataFrame(
self.__oof_test,
columns=["rf_2", "et_2", "gb_2", "xg_2", "lr_l1_2", "lr_l2_2", "net_2", "knc_2"]
)
def model_feature_output(self):
self.__feature_train = pd.concat([self.__train_feature, self.__oof_train], axis=1)
self.__feature_test = pd.concat([self.__test_feature, self.__oof_test], axis=1)
self.__feature_train.to_csv(os.path.join(self.__output_path, "feature_train_res.csv"), index=False)
self.__feature_test.to_csv(os.path.join(self.__output_path, "feature_test_res.csv"), index=False)
if __name__ == "__main__":
feature = Feature(
input_path=sys.argv[1],
output_path=sys.argv[2]
)
feature.data_prepare()
feature.model_fit()
feature.model_feature_output()
| 39.89418
| 129
| 0.627188
|
819f84abee150481cc46d06b7de8fe299786ef61
| 5,998
|
py
|
Python
|
model.py
|
Yuibooo/pytorch-soft-actor-critic
|
cc17d0732712c404a8a2adc102aab00da0d7c87b
|
[
"MIT"
] | null | null | null |
model.py
|
Yuibooo/pytorch-soft-actor-critic
|
cc17d0732712c404a8a2adc102aab00da0d7c87b
|
[
"MIT"
] | null | null | null |
model.py
|
Yuibooo/pytorch-soft-actor-critic
|
cc17d0732712c404a8a2adc102aab00da0d7c87b
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class ValueNetwork(nn.Module):
def __init__(self, num_inputs, hidden_dim):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
# Q3 architecture
self.linear7 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear8 = nn.Linear(hidden_dim, hidden_dim)
self.linear9 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = F.relu(self.linear1(xu))
x1 = F.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = F.relu(self.linear4(xu))
x2 = F.relu(self.linear5(x2))
x2 = self.linear6(x2)
x3 = F.relu(self.linear7(xu))
x3 = F.relu(self.linear8(x3))
x3 = self.linear9(x3)
return x1, x2, x3
def q1(self, state, action):
q1 = F.relu(self.linear1(torch.cat([state, action], 1)))
q1 = F.relu(self.linear2(q1))
q1 = self.linear3(q1)
return q1
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(GaussianPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean
def lod_prob(self, state, action):
# used for fit distribution
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = torch.atan(action)
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log((1 - action.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
return log_prob
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(GaussianPolicy, self).to(device)
class DeterministicPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(DeterministicPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean = nn.Linear(hidden_dim, num_actions)
self.noise = torch.Tensor(num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = 1.
self.action_bias = 0.
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias
return mean
def sample(self, state):
mean = self.forward(state)
noise = self.noise.normal_(0., std=0.1)
noise = noise.clamp(-0.25, 0.25)
action = mean + noise
return action, torch.tensor(0.), mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(GaussianPolicy, self).to(device)
| 33.50838
| 84
| 0.621207
|
e82d5dce623b7b6329497fb5da7586095a07a36c
| 22,215
|
py
|
Python
|
openks/distributed/openks_distributed/gpu/GPUDistributed.py
|
HIT-SCIR-xuanxuan/OpenKS
|
a7f2ce0890822113322aad22e98d6c961e63caef
|
[
"Apache-2.0"
] | 88
|
2020-07-13T05:36:59.000Z
|
2022-03-17T21:30:44.000Z
|
openks/distributed/openks_distributed/gpu/GPUDistributed.py
|
HIT-SCIR-xuanxuan/OpenKS
|
a7f2ce0890822113322aad22e98d6c961e63caef
|
[
"Apache-2.0"
] | 16
|
2021-04-25T03:04:10.000Z
|
2022-02-07T02:36:29.000Z
|
openks/distributed/openks_distributed/gpu/GPUDistributed.py
|
HIT-SCIR-xuanxuan/OpenKS
|
a7f2ce0890822113322aad22e98d6c961e63caef
|
[
"Apache-2.0"
] | 35
|
2020-11-13T15:49:43.000Z
|
2022-03-30T07:55:07.000Z
|
# Copyright (c) 2020 Room 525 Research Group, Zhejiang University.
# All Rights Reserved.
import logging
import paddle.fluid as fluid
import paddle.fluid.io as io
import paddle.fluid.transpiler.distribute_transpiler as dist_transpiler
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.framework import Program
from ...openks_distributed.base import \
BaseDistributedAlgorithm, BaseDistributedOptimizer, Mode
from paddle.fluid import compiler
from .fs_wrapper import LocalFS, BDFS
import os
import sys
import six
import json
import re
import shutil
class TrainStatus(object):
def __init__(self, epoch_no=-1):
# completed epoch
self._epoch_no = epoch_no
def next(self):
return self._epoch_no + 1
def __eq__(self, t):
return self._epoch_no == t._epoch_no
def __ne__(self, t):
return not self == t
class GPUDistributedAlgorithm(BaseDistributedAlgorithm):
def __init__(self):
super(GPUDistributedAlgorithm, self).__init__(Mode.COLLECTIVE)
self._local_ip = 0
self.startup_program = None
self._origin_program = None
self._transpiled_program = None
self.main_program = None
self._checkoint_prefix = "__paddle_fleet_checkpoint__"
self._param_file_name = "_paddle_fleet_param__"
def init_worker(self):
logging.warn(
"You should not call 'init_worker' method for collective mode.")
def run_worker(self, main_programs=None, scopes=None):
logging.warn(
"You should not call 'run_worker' method for collective mode.")
def init_server(self, model_dir=None):
logging.warn(
"You should not call 'init_server' method for collective mode.")
def run_server(self):
logging.warn(
"You should not call 'run_server' method for collective mode.")
def stop_worker(self):
logging.warn(
"You should not call 'stop_worker' method for collective mode.")
def distributed_optimizer(self, optimizer, strategy=None):
self._optimizer = \
HeterogeneousDistributedOptimizer(optimizer, strategy)
return self._optimizer
def save_inference_model(self,
executor,
dirname,
feeded_var_names=None,
target_vars=None,
main_program=None,
export_for_deployment=True):
"""
Prune the given `main_program` to build a new program especially for
inference, and then save it and all related parameters to given
`dirname` by the `executor`.
"""
assert isinstance(executor, Executor), \
"In fleet.save_inference_model() function, executor must be as" \
" Executor type."
if main_program is None:
main_program = self._origin_program
assert isinstance(main_program, Program), \
"In fleet.save_inference_model() function, main_program " \
"must be as Program type."
io.save_inference_model(dirname, feeded_var_names, target_vars,
executor, main_program, None, None,
export_for_deployment)
def save_persistables(self,
executor,
dirname,
main_program=None,
filename=None):
"""
This function filters out all variables with `persistable==True` from
the give `main_program` and then saves these variables to the folder
`dirname` or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
assert isinstance(executor, Executor), \
"In fleet.save_inference_model() function, executor must be as" \
" Executor type."
if main_program is None:
main_program = self._origin_program
assert isinstance(main_program, Program), \
"In fleet.save_inference_model() function, main_program " \
"must be as Program type."
io.save_persistables(executor, dirname, main_program, filename=filename)
def _save_train_status(self, path, train_status):
d = {}
d["epoch_no"] = train_status._epoch_no
file_name = "{}/fleet_train_status".format(path)
with open(file_name, 'w') as f:
json.dump(d, f)
def _load_train_status(self, path):
file_name = "{}/fleet_train_status".format(path)
r = TrainStatus()
if not os.path.isfile(file_name):
return r
d = {}
with open(file_name, 'r') as f:
d = json.load(f)
assert "epoch_no" in d, "Can't find epoch_no in dict from train_status file:{}".format(
d)
r._epoch_no = d["epoch_no"]
assert r._epoch_no >= 0, "Data in checkpoint file is not valid:{}".format(
d)
return r
def _get_last_checkpoint_no(self, root_path, fs):
"""
only get the first depth
"""
max_no = -1
d = {}
dirs = fs.list_dirs(root_path)
for dir in dirs:
g = dir.split(".")
if len(g) != 2:
continue
if g[0] != "__paddle_fleet_checkpoint__":
continue
try:
n = int(g[1])
if n > max_no:
max_no = n
except:
continue
return max_no
def clean_redundant_check_points(self,
root_path,
fs=LocalFS(),
checkpoint_num=1):
max_no = self._get_last_checkpoint_no(root_path, fs)
if max_no < 0:
return
if checkpoint_num < 1:
checkpoint_num = 1
dirs = fs.list_dirs(root_path)
for dir in dirs:
g = dir.split(".")
if len(g) != 2:
continue
if g[0] != self._checkoint_prefix:
continue
try:
n = int(g[1])
if n <= max_no - checkpoint_num:
path = "{}/{}.{}".format(root_path, self._checkoint_prefix,
n)
fs.rmr(path)
except Exception as e:
print(e)
continue
def save_check_point(self,
executor,
path,
train_status,
main_program=None,
fs=LocalFS(),
local_cache_path=".cache",
remain_all_checkpoint=True):
"""
This function save persistables and current epoch num to path.
"""
if main_program == None:
main_program = self._transpiled_program
if not fs.stat(path):
fs.mkdir(path)
max_no = self._get_last_checkpoint_no(path, fs=fs)
if max_no < 0:
max_no = -1
real_path = "{}/{}.{}".format(path, self._checkoint_prefix, max_no + 1)
tmp_path = "{}.tmp".format(real_path)
saved_path = tmp_path
local_fs = LocalFS()
cache_path = None
if fs.need_upload_download():
cache_path = "{}/{}.{}.saved_cache".format(
local_cache_path, self._checkoint_prefix, max_no + 1)
if not local_fs.stat(cache_path):
local_fs.mkdir(cache_path)
saved_path = cache_path
self.save_persistables(
executor=executor,
dirname=saved_path,
main_program=main_program,
filename=self._param_file_name)
self._save_train_status(path=saved_path, train_status=train_status)
if fs.need_upload_download():
fs.delete(tmp_path)
fs.upload(cache_path, tmp_path)
fs.mv(tmp_path, real_path)
if not remain_all_checkpoint:
self.clean_redundant_check_points(path)
def load_check_point(self,
executor,
path,
trainer_id,
main_program=None,
fs=LocalFS(),
local_cache_path=".cache",
ignore_empty=True):
"""
This function load persistables and current epoch num from path.
"""
max_no = self._get_last_checkpoint_no(path, fs)
if not ignore_empty:
assert max_no >= 0, "Can't find checkpoint"
if max_no < 0:
return None
local_fs = LocalFS()
if fs.need_upload_download():
cache_path = "{}/{}.{}.load_cache.{}".format(
local_cache_path, self._checkoint_prefix, max_no, trainer_id)
if local_fs.stat(cache_path):
local_fs.delete(cache_path)
real_path = "{}/{}.{}".format(path, self._checkoint_prefix, max_no)
load_path = real_path
if fs.need_upload_download():
fs.download(real_path, cache_path)
load_path = cache_path
if main_program == None:
main_program = self._transpiled_program
io.load_persistables(
executor=executor,
dirname=load_path,
main_program=main_program,
filename=self._param_file_name)
return self._load_train_status(load_path)
algorithm = GPUDistributedAlgorithm()
class HeterogeneousDistributedOptimizer(BaseDistributedOptimizer):
"""
HeterogeneousDistributedOptimizer is a wrapper for paddle.fluid.optimizer
HeterogeneousA user should pass a paddle.fluid.optimizer to DistributedOptimizer
minimize() function is implemented.
HeterogeneousDistributedOptimizer is the starting point for a user who wants to
run distributed training. The optimized information will be stored in
Fleet() instance who holds the global information about current distributed
training.
"""
def __init__(self, optimizer, strategy):
super(HeterogeneousDistributedOptimizer, self).__init__(optimizer, strategy)
self._forward_recompute = strategy.forward_recompute
if (not isinstance(strategy.recompute_checkpoints, list)):
raise ValueError("DistStrategy.recompute_checkpoints should"
"be a List")
self._recompute_checkpoints = strategy.recompute_checkpoints
self._use_amp = strategy.use_amp
self._amp_loss_scaling = strategy.amp_loss_scaling
self.print_config = False
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
return self._optimizer.backward(loss, startup_program, parameter_list,
no_grad_set, callbacks)
def apply_gradients(self, params_grads):
return self._optimizer.apply_gradients(params_grads)
def _check_condition(self, name, **kwargs):
for k, v in six.iteritems(kwargs):
if v is True:
assert False, "you can't use %s and %s together" % (name, k)
def _check_collective_mode(self, main_program, optimizer, strategy):
"""
Check the conflict conditions.
"""
if strategy.use_local_sgd:
strategy.mode = "collective"
strategy.collective_mode = "local_sgd"
self._check_condition(
"use_local_sgd",
use_dgc=main_program._enable_dgc,
use_dist_fc=strategy.use_dist_fc,
use_lamb=main_program._use_lamb)
if strategy.use_dist_fc:
self._check_condition(
"use_dist_fc",
use_dgc=main_program._enable_dgc,
use_local_sgd=strategy.use_local_sgd,
use_lamb=main_program._use_lamb)
assert strategy.dist_fc_config is not None, "GPUBuildStrategy.dist_fc_config should be set"
if strategy._ut4grad_allreduce:
strategy.mode = "collective"
strategy.collective_mode = "grad_allreduce"
self._check_condition(
"_ut4grad_allreduce",
use_dgc=main_program._enable_dgc,
use_lamb=main_program._use_lamb)
if self._strategy.collective_mode=="local_sgd" \
or self._strategy.collective_mode == "grad_allreduce":
assert self._strategy.mode == "collective", \
"local_sgd and grad_allreduce can be used under collective mode"
def _transpile(self, startup_program, main_program):
"""
Transpile the programs to distributed programs. And add the variables.
"""
worker_endpoints = algorithm.worker_endpoints()
trainer_id = algorithm.worker_index()
current_endpoint = algorithm.worker_endpoints()[trainer_id]
worker_endpoints_env = ','.join(worker_endpoints)
trainers_num = algorithm.worker_num()
if self.print_config:
print("worker_endpoints:{} trainers_num:{} current_endpoint:{} \
trainer_id:{}".format(worker_endpoints, trainers_num,
current_endpoint, trainer_id))
# call transpiler
config = dist_transpiler.DistributeTranspilerConfig()
config.mode = self._strategy.mode
config.collective_mode = self._strategy.collective_mode
config.nccl_comm_num = self._strategy.nccl_comm_num
config.use_hierarchical_allreduce = self._strategy.use_hierarchical_allreduce
config.hierarchical_allreduce_inter_nranks = self._strategy.hierarchical_allreduce_inter_nranks
t = dist_transpiler.DistributeTranspiler(config=config)
t.transpile(
trainer_id=trainer_id,
trainers=worker_endpoints_env,
startup_program=startup_program,
program=main_program,
current_endpoint=current_endpoint)
def _get_node_ips_from_endpoints(self, endpoints):
ss = set()
ips = []
for ep in endpoints:
ip = ep.split(":")[0].strip()
if ip not in ss:
ss.add(ip)
ips.append(ip)
else:
continue
return ips
def _node_num(self):
worker_endpoints = algorithm.worker_endpoints()
current_endpoint = algorithm.worker_endpoints()[algorithm.worker_index()]
worker_endpoints_env = ','.join(worker_endpoints)
node_ips = self._get_node_ips_from_endpoints(worker_endpoints)
node_ip = current_endpoint.split(":")[0].strip()
node_num = len(node_ips)
return node_num
def _try_to_compile(self, startup_program, main_program):
node_num = self._node_num()
assert node_num >= 1, "nccl2 node_num must >= 1, now:{}" % node_num
exec_strategy = self._strategy.exec_strategy
if node_num <= 1:
if self._strategy.nccl_comm_num > 1:
logging.warn("set nccl_comm_num=1 since you only have 1 node.")
self._strategy.nccl_comm_num = 1
if self._strategy.use_hierarchical_allreduce:
logging.warn(
"set use_hierarchical_allreduce=False since you only have 1 node."
)
self._strategy.use_hierarchical_allreduce = False
sync_allreduce = os.getenv("FLAGS_sync_nccl_allreduce")
if sync_allreduce is None or sync_allreduce == "1":
exec_strategy.num_threads = self._strategy.nccl_comm_num + 1
if self._strategy.use_hierarchical_allreduce:
exec_strategy.num_threads = 2 * self._strategy.nccl_comm_num + 1
if exec_strategy.num_threads > 4:
logging.warn(
"if you use use_hierarchical_allreduce or "
"with multi nccl comm, please export FLAGS_sync_nccl_allreduce = 0"
)
# NOTE. open sync_batch_norm will hang when use multi num_threads
sync_batch_norm = self._strategy.sync_batch_norm
if sync_batch_norm is not None and sync_batch_norm is True:
self._strategy.nccl_comm_num = 1
self._strategy.use_hierarchical_allreduce = False
exec_strategy.num_threads = 1
logging.warn(
"use sync_batch_norm will hang when set num_threads > 1, so "
"set num_threads=1, nccl_comm_num=1, use_hierarchical_allreduce=False."
)
if self.print_config:
print("node_num:", node_num, "num_threads:",
exec_strategy.num_threads, "use_hierarchical_allreduce:",
self._strategy.use_hierarchical_allreduce, "nccl_comm_num:",
self._strategy.nccl_comm_num, "FLAGS_sync_nccl_allreduce:",
sync_allreduce)
self._transpile(startup_program, main_program)
if self._strategy.mode == "collective":
return main_program
self._strategy.num_trainers = algorithm.worker_num()
self._strategy.trainer_id = algorithm.worker_index()
self._strategy.trainers_endpoints = algorithm.worker_endpoints()
self._strategy.enable_backward_optimizer_op_deps = True
self._compiled_program = compiler.CompiledProgram(main_program)
self._compiled_program.with_data_parallel(
loss_name=self._loss.name,
build_strategy=self._strategy,
exec_strategy=self._strategy.exec_strategy,
share_vars_from=None)
return self._compiled_program
def raiseOptimizeError(self, strategy_name, optimize_name):
raise ValueError("can not use {0} when you set DistStrategy.{1} "
"as True".format(optimize_name, strategy_name))
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
minimize a program through loss
Args:
loss (Variable|Variable List): loss variable or loss variable list to run optimization.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
tuple: (optimize_ops, params_grads) which are, list of operators appended;
and list of (param, grad) Variables pair for optimization.
Note that in parameter server mode, a worker will not get anything about optimize_os
Because optimizer algorithms run on pserver side. We will make this usable in pserver
process, but currently the optimization part is written into BaseDistributedAlgorithm(). A user does not
need to care about how to startup a pserver node.
"""
# check optimizer conflicts
if self._forward_recompute:
if self._recompute_checkpoints == []:
raise ValueError("please set strategy.recompute_checkpoints"
"when set strategy.forward_recompute as True")
if self._optimizer.__class__.__name__ in [
"RecomputeOptimizer", "OptimizerWithMixedPrecision"
]:
self.raiseOptimizeError("forward_recompute",
self._optimizer.__class__.__name__)
self._optimizer = \
fluid.optimizer.RecomputeOptimizer(self._optimizer)
self._optimizer._set_checkpoints(self._recompute_checkpoints)
if self._use_amp:
if self._optimizer.__class__.__name__ in [
"OptimizerWithMixedPrecision", "DGCMomentumOptimizer"
]:
self.raiseOptimizeError("mixed_precision",
self._optimizer.__class__.__name__)
self._optimizer = fluid.contrib.mixed_precision.decorate(
self._optimizer,
init_loss_scaling=self._amp_loss_scaling,
use_dynamic_loss_scaling=True)
main_program = loss.block.program
if startup_program is None:
startup_program = fluid.default_startup_program()
algorithm.startup_program = startup_program
self._loss = loss
self._check_collective_mode(main_program, self._optimizer,
self._strategy)
optimize_ops, param_grads = self._optimizer.minimize(
loss,
startup_program=startup_program,
parameter_list=parameter_list,
no_grad_set=no_grad_set)
algorithm._origin_program = main_program.clone(for_test=False)
algorithm._transpiled_program = main_program
algorithm.main_program = self._try_to_compile(startup_program, main_program)
return optimize_ops, param_grads
| 38.434256
| 113
| 0.588026
|
83c0cb455d4a0461ca241652c69e54f15837b488
| 2,069
|
py
|
Python
|
src/preprocess/vipcup_dicom2nifty.py
|
cmlab-mira/MedicalPro
|
3918c95197fd24406ce2117cc7ff9ce21bb8c620
|
[
"MIT"
] | 6
|
2020-02-01T07:19:32.000Z
|
2021-05-10T13:55:49.000Z
|
src/preprocess/vipcup_dicom2nifty.py
|
cmlab-mira/MedicalPro
|
3918c95197fd24406ce2117cc7ff9ce21bb8c620
|
[
"MIT"
] | 1
|
2020-06-21T08:33:35.000Z
|
2020-06-21T08:33:35.000Z
|
src/preprocess/vipcup_dicom2nifty.py
|
cmlab-mira/MedicalPro
|
3918c95197fd24406ce2117cc7ff9ce21bb8c620
|
[
"MIT"
] | 1
|
2020-11-11T06:24:12.000Z
|
2020-11-11T06:24:12.000Z
|
import logging
import argparse
from pathlib import Path
import dicom2nifti
def main(args):
data_dir = args.data_dir
output_dir = args.output_dir
if output_dir.exists() is False:
output_dir.mkdir(parents=True)
for sub_dir in data_dir.iterdir():
dir_name = sub_dir.name
(output_dir / 'training').mkdir(exist_ok=True)
(output_dir / 'testing').mkdir(exist_ok=True)
patient_paths = sorted(dir_ for dir_ in sub_dir.iterdir() if dir_.is_dir())
for path in patient_paths:
folders = [folder for folder in path.iterdir() if folder.is_dir()]
for folder in folders:
dcm_files = list(folder.glob('*/*.dcm'))
if len(dcm_files) == 1:
# contour dicom
continue
else:
case_id = folder.parts[-2]
if folder.parts[-3] == 'VIP_CUP18_TestData':
output_path = output_dir / 'testing' / f"{case_id}.nii.gz"
else:
output_path = output_dir / 'training' / f"{case_id}.nii.gz"
try:
dicom2nifti.dicom_series_to_nifti(folder.as_posix(),
output_path.as_posix(),
reorient_nifti=True)
except Exception:
print(f"Failed: case {case_id}.")
def _parse_args():
parser = argparse.ArgumentParser(description="Convert the data of VIPCUP from dicom to nifti format.")
parser.add_argument('data_dir', type=Path, help='The directory of the dataset.')
parser.add_argument('output_dir', type=Path, help='The directory of the processed data.')
args = parser.parse_args()
return args
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
args = _parse_args()
main(args)
| 36.946429
| 106
| 0.550507
|
4e6ca70ae0267d7b862cfff119419ebcab36f5b7
| 38,844
|
py
|
Python
|
GPT2RGA.py
|
asigalov61/Lars-Ulrich-Challenge
|
381880fac426e91954ae0c58448041ead4bdffb2
|
[
"Apache-2.0"
] | 6
|
2021-10-09T14:12:10.000Z
|
2022-01-25T06:14:46.000Z
|
GPT2RGA.py
|
asigalov61/Lars-Ulrich-Challenge
|
381880fac426e91954ae0c58448041ead4bdffb2
|
[
"Apache-2.0"
] | 1
|
2022-03-27T12:22:39.000Z
|
2022-03-27T12:22:39.000Z
|
GPT2RGA.py
|
asigalov61/Lars-Ulrich-Challenge
|
381880fac426e91954ae0c58448041ead4bdffb2
|
[
"Apache-2.0"
] | 2
|
2021-10-09T14:12:17.000Z
|
2021-11-04T00:57:07.000Z
|
#! /usr/bin/python3
r'''###############################################################################
###################################################################################
#
#
# GPT-2 with Relative Global Attention
# Version 0.5
#
# PLEASE NOTE THAT THIS IS A WORK IN PROGRESS
# CHECK BACK FOR UPDATES SOON
#
# Based upon a source-code of Sashmark97:
# https://github.com/Sashmark97/midigen
#
# Project Los Angeles
# Tegridy Code 2021
#
# https://github.com/Tegridy-Code/Project-Los-Angeles
#
#
###################################################################################
###################################################################################
# Copyright 2021 Project Los Angeles / Tegridy Code
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################################
###################################################################################'''
########################################################
#
# Critical dependencies/requirements:
#
# pip install torch
# pip install tqdm
#
########################################################
print('Loading GPT2-RGA Module...')
########################################################
import glob
import os
import sys
import math
import time
import random
import pickle
import joblib
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
import torch.nn as nn
from torch.nn import functional as F
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.modules.normalization import LayerNorm
from torch.nn.parameter import Parameter
from torch.nn.modules.linear import Linear
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import *
from torch.nn.functional import linear, softmax, dropout
########################################################
# Constants
SEQUENCE_START = 0
RANGE_NOTE_ON = 128
RANGE_NOTE_OFF = 128
RANGE_VEL = 32
RANGE_TIME_SHIFT = 100
# Taken from the paper
ADAM_BETA_1 = 0.9
ADAM_BETA_2 = 0.98
ADAM_EPSILON = 10e-9
LR_DEFAULT_START = 1.0
SCHEDULER_WARMUP_STEPS = 4000
# LABEL_SMOOTHING_E = 0.1
# DROPOUT_P = 0.1
TOKEN_END = RANGE_NOTE_ON + RANGE_NOTE_OFF + RANGE_VEL + RANGE_TIME_SHIFT
TOKEN_PAD = TOKEN_END + 1
VOCAB_SIZE = TOKEN_PAD + 1
TORCH_FLOAT = torch.float32
TORCH_INT = torch.int32
TORCH_LABEL_TYPE = torch.long
PREPEND_ZEROS_WIDTH = 4
TORCH_CPU_DEVICE = torch.device("cpu")
USE_CUDA = 1
TORCH_CUDA_DEVICE = torch.device("cuda:0")
#====
weight_modulus = 1
print_modulus = 1
n_workers = 1
lr = None
ce_smoothing = None
batch_size = 4
random_seq = True
epochs = 5
rpr = False #'store_true'
max_seq = 1024
n_layers = 6
num_heads = 8
d_model = 512
dim_feedforward = 512
dropout_prob = 0.1
########################################################
def cpu_device():
return TORCH_CPU_DEVICE
def get_device():
if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):
return TORCH_CPU_DEVICE
else:
return TORCH_CUDA_DEVICE
def train(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, num_iters=-1):
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
loss_hist = []
out = -1
model.train()
with tqdm(total=len(dataloader)) as bar_train:
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
x = batch[0].to(get_device())
tgt = batch[1].to(get_device())
y, _ = model(x)
y = y.reshape(y.shape[0] * y.shape[1], -1)
tgt = tgt.flatten()
out = loss.forward(y, tgt)
out.backward()
opt.step()
if(lr_scheduler is not None):
lr_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
lr = opt.param_groups[0]['lr']
bar_train.set_description(f'Epoch: {cur_epoch} Loss: {float(out):.4} LR: {float(lr):.8}')
bar_train.update(1)
loss_hist.append(out.item())
if batch_num == num_iters:
break
return loss_hist
def compute_epiano_accuracy(out, tgt):
softmax = nn.Softmax(dim=-1)
out = torch.argmax(softmax(out), dim=-1)
out = out.flatten()
tgt = tgt.flatten()
mask = (tgt != TOKEN_PAD)
out = out[mask]
tgt = tgt[mask]
if(len(tgt) == 0):
return 1.0
num_right = (out == tgt)
num_right = torch.sum(num_right).type(TORCH_FLOAT)
acc = num_right / len(tgt)
return acc
def eval_model(model, dataloader, loss, num_iters=-1):
model.eval()
avg_acc = -1
avg_loss = -1
with torch.set_grad_enabled(False):
n_test = len(dataloader)
sum_loss = 0.0
sum_acc = 0.0
with tqdm(total=len(dataloader)) as bar_eval:
for batch in dataloader:
x = batch[0].to(get_device())
tgt = batch[1].to(get_device())
y, _ = model(x)
sum_acc += float(compute_epiano_accuracy(y, tgt))
y = y.reshape(y.shape[0] * y.shape[1], -1)
tgt = tgt.flatten()
out = loss.forward(y, tgt)
sum_loss += float(out)
bar_eval.set_description(f'Loss val: {float(out):.4} Acc: {float(sum_acc / (bar_eval.n + 1)):.4}')
bar_eval.update(1)
if bar_eval.n == num_iters:
break
avg_loss = sum_loss / n_test
avg_acc = sum_acc / n_test
return avg_loss, avg_acc
class LrStepTracker:
def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):
# Store Values
self.warmup_steps = warmup_steps
self.model_dim = model_dim
self.init_steps = init_steps
# Begin Calculations
self.invsqrt_dim = (1 / math.sqrt(model_dim))
self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))
# step
def step(self, step):
step += self.init_steps
if(step <= self.warmup_steps):
return self.invsqrt_dim * self.invsqrt_warmup * step
else:
invsqrt_step = (1 / math.sqrt(step))
return self.invsqrt_dim * invsqrt_step
# get_lr
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
########################################################
#@title Functions
class EPianoDataset(Dataset):
"""
----------
Author: Damon Gwinn
----------
Pytorch Dataset for the Maestro e-piano dataset (https://magenta.tensorflow.org/datasets/maestro).
Recommended to use with Dataloader (https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)
Uses all files found in the given root directory of pre-processed (preprocess_midi.py)
Maestro midi files.
----------
"""
def __init__(self, midi_list, max_seq=2048, random_seq=True):
self.max_seq = max_seq
self.random_seq = random_seq
self.data_files = midi_list
def __len__(self):
"""
----------
Author: Damon Gwinn
----------
How many data files exist in the given directory
----------
"""
return len(self.data_files)
def __getitem__(self, idx):
"""
----------
Author: Damon Gwinn
----------
Gets the indexed midi batch. Gets random sequence or from start depending on random_seq.
Returns the input and the target.
----------
"""
raw_mid = torch.tensor(self.data_files, dtype=TORCH_LABEL_TYPE, device=cpu_device())
x, tgt = process_midi(raw_mid, self.max_seq, self.random_seq)
return x, tgt
def process_midi(raw_mid, max_seq, random_seq):
"""
----------
Author: Damon Gwinn
----------
Takes in pre-processed raw midi and returns the input and target. Can use a random sequence or
go from the start based on random_seq.
----------
"""
x = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
raw_len = len(raw_mid)
full_seq = max_seq + 1 # Performing seq2seq
if(raw_len == 0):
return x, tgt
start = 0
end = 0
# Randomly selecting a range
if (random_seq):
end_range = raw_len - full_seq
start = random.randint(abs(SEQUENCE_START), abs(end_range))
# Always taking from the start to as far as we can
else:
start = SEQUENCE_START
end = start + full_seq
data = raw_mid[start:end]
x = data[:max_seq]
tgt = data[1:full_seq]
return x, tgt
########################################################
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.enable_rpr = config.enable_rpr
if config.enable_rpr:
self.attn = MultiheadAttentionRPR(config.n_embd, config.n_head, config.attn_pdrop, er_len=config.er_len)
else:
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, config.dim_feedforward),
nn.GELU(),
nn.Linear(config.dim_feedforward, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, mask=None):
if self.enable_rpr:
x = x + self.attn(self.ln1(x), self.ln1(x), self.ln1(x), attn_mask=mask)[0]
else:
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class MultiheadAttentionRPR(nn.Module):
"""
----------
Author: Pytorch
Modified: Damon Gwinn
----------
For Relative Position Representation support (https://arxiv.org/abs/1803.02155)
https://pytorch.org/docs/1.2.0/_modules/torch/nn/modules/activation.html#MultiheadAttention
Modification to add RPR embedding Er and call custom multi_head_attention_forward_rpr
----------
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
add_zero_attn=False, kdim=None, vdim=None, er_len=None):
super(MultiheadAttentionRPR, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
# Adding RPR embedding matrix
if(er_len is not None):
self.Er = Parameter(torch.rand((er_len, self.head_dim), dtype=torch.float32))
else:
self.Er = None
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False:
# return F.multi_head_attention_forward(
# query, key, value, self.embed_dim, self.num_heads,
# self.in_proj_weight, self.in_proj_bias,
# self.bias_k, self.bias_v, self.add_zero_attn,
# self.dropout, self.out_proj.weight, self.out_proj.bias,
# training=self.training,
# key_padding_mask=key_padding_mask, need_weights=need_weights,
# attn_mask=attn_mask, use_separate_proj_weight=True,
# q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
# v_proj_weight=self.v_proj_weight)
return multi_head_attention_forward_rpr(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpr_mat=self.Er)
else:
if not hasattr(self, '_qkv_same_embed_dim'):
warnings.warn('A new version of MultiheadAttention module has been implemented. \
Please re-train your model with the new module',
UserWarning)
# return F.multi_head_attention_forward(
# query, key, value, self.embed_dim, self.num_heads,
# self.in_proj_weight, self.in_proj_bias,
# self.bias_k, self.bias_v, self.add_zero_attn,
# self.dropout, self.out_proj.weight, self.out_proj.bias,
# training=self.training,
# key_padding_mask=key_padding_mask, need_weights=need_weights,
# attn_mask=attn_mask)
return multi_head_attention_forward_rpr(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpr_mat=self.Er)
# multi_head_attention_forward_rpr
def multi_head_attention_forward_rpr(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None, # type: Optional[Tensor]
rpr_mat=None
):
'''
print('Query: ', query.shape, 'Key: ', key.shape, 'Value: ', value.shape)
print('Equal: ', torch.equal(query, key) and torch.equal(key, value))
print('embed_dim_to_check: ', embed_dim_to_check)
print('num_heads:', num_heads)
print('in_proj_weight: ', in_proj_weight.shape)
print('in_proj_bias: ', in_proj_bias.shape)
print('bias_k:', bias_k, 'bias_v', bias_v)
print('add_zero_attn:', add_zero_attn)
print('dropout_p: ', dropout_p)
print('out_proj_weight: ', out_proj_weight.shape)
print('out_proj_bias:', out_proj_bias.shape)
print('training:', training)
print('need_weights:', need_weights)
print('use_separate_proj_weight:', use_separate_proj_weight)
print('key_padding_mask:', key_padding_mask)
print('attn_mask:', attn_mask.shape)
print('q_proj_weight:', q_proj_weight)
print('k_proj_weight:', k_proj_weight)
print('v_proj_weight:', v_proj_weight)
print('static_k:', static_k)
print('static_v:', static_v)
print('rpr_mat:', rpr_mat.shape)
'''
"""
----------
Author: Pytorch
Modified: Damon Gwinn
----------
For Relative Position Representation support (https://arxiv.org/abs/1803.02155)
https://pytorch.org/docs/1.2.0/_modules/torch/nn/functional.html
Modification to take RPR embedding matrix and perform skew optimized RPR (https://arxiv.org/abs/1809.04281)
----------
"""
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
qkv_same = torch.equal(query, key) and torch.equal(key, value)
kv_same = torch.equal(key, value)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if use_separate_proj_weight is not True:
if qkv_same:
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif kv_same:
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask,
torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
######### ADDITION OF RPR ###########
if(rpr_mat is not None):
rpr_mat = _get_valid_embedding(rpr_mat, q.shape[1], k.shape[1])
qe = torch.einsum("hld,md->hlm", q, rpr_mat)
srel = _skew(qe)
attn_output_weights += srel
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
def _get_valid_embedding(Er, len_q, len_k):
"""
----------
Author: Damon Gwinn
----------
Gets valid embeddings based on max length of RPR attention
----------
"""
len_e = Er.shape[0]
start = max(0, len_e - len_q)
return Er[start:, :]
def _skew(qe):
"""
----------
Author: Damon Gwinn
----------
Performs the skew optimized RPR computation (https://arxiv.org/abs/1809.04281)
----------
"""
sz = qe.shape[1]
mask = (torch.triu(torch.ones(sz, sz).to(qe.device)) == 1).float().flip(0)
qe = mask * qe
qe = F.pad(qe, (1,0, 0,0, 0,0))
qe = torch.reshape(qe, (qe.shape[0], qe.shape[2], qe.shape[1]))
srel = qe[:, 1:, :]
return srel
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.enable_rpr = config.enable_rpr
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, idx, targets=None):
b, t = idx.size()
if self.enable_rpr:
mask = generate_square_subsequent_mask(t).to(get_device())
else:
mask = None
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
if self.enable_rpr:
x = x.permute(1,0,2)
for module in self.blocks:
x = module(x, mask=mask)
x = x.permute(1,0,2)
else:
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
if self.enable_rpr:
del mask
return logits, loss
def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0, temperature=0, stop_token=TOKEN_END, silent_mode=True):
assert (not self.training), "Cannot generate while in training mode"
if not silent_mode: print("Generating sequence of max length:", target_seq_length)
gen_seq = torch.full((1,target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())
num_primer = len(primer)
gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())
cur_i = num_primer
while(cur_i < target_seq_length):
logits, _ = self.forward(gen_seq[..., :cur_i])
y = self.softmax(logits)[..., :TOKEN_END]
token_probs = y[:, cur_i-1, :] / (temperature if temperature > 0 else 1.)
if(beam == 0):
beam_ran = 2.0
else:
beam_ran = random.uniform(0,1)
if(beam_ran <= beam_chance):
token_probs = token_probs.flatten()
top_res, top_i = torch.topk(token_probs, beam)
beam_rows = top_i // VOCAB_SIZE
beam_cols = top_i % VOCAB_SIZE
gen_seq = gen_seq[beam_rows, :]
gen_seq[..., cur_i] = beam_cols
else:
distrib = torch.distributions.categorical.Categorical(probs=token_probs)
next_token = distrib.sample()
gen_seq[:, cur_i] = next_token
# Let the transformer decide to end if it wants to
if(next_token == stop_token):
if not silent_mode: print("Model called end of sequence at:", cur_i, "/", target_seq_length)
break
cur_i += 1
if(cur_i % 50 == 0):
if not silent_mode: print(cur_i, "/", target_seq_length)
return gen_seq[:, :cur_i]
def generate_square_subsequent_mask(sz: int) -> Tensor:
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, dim_feedforward, enable_rpr=False, er_len=None, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
self.dim_feedforward = dim_feedforward
self.enable_rpr = enable_rpr
self.er_len = er_len
for k,v in kwargs.items():
setattr(self, k, v)
import logging
logger = logging.getLogger(__name__)
########################################################
def Plot_Losses(losses):
pass
########################################################
print('GPT2-RGA loading complete!')
print('Enjoy!')
########################################################
########################################################
| 37.458052
| 140
| 0.571877
|
6caa1b3e4d44ad185c35d31e48a8de0dc13ff07b
| 3,991
|
py
|
Python
|
alipay/aop/api/request/AlipayOpenMiniDeploypackageQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayOpenMiniDeploypackageQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayOpenMiniDeploypackageQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenMiniDeploypackageQueryModel import AlipayOpenMiniDeploypackageQueryModel
class AlipayOpenMiniDeploypackageQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenMiniDeploypackageQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenMiniDeploypackageQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.deploypackage.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.524138
| 148
| 0.646455
|
8c8a7e91a09e3679b6d7166ed50dde4b9defb052
| 1,229
|
py
|
Python
|
pyrival/strings/hashing.py
|
tusshar2000/PyRival
|
1d4b29e264088978243a9e72e4bc603acc1fee11
|
[
"Apache-2.0"
] | 3
|
2020-05-07T10:48:11.000Z
|
2020-08-15T08:31:40.000Z
|
pyrival/strings/hashing.py
|
dipta007/PyRival
|
4763ce2e1a931438194ea9c5aebc28ef7535c4bc
|
[
"Apache-2.0"
] | null | null | null |
pyrival/strings/hashing.py
|
dipta007/PyRival
|
4763ce2e1a931438194ea9c5aebc28ef7535c4bc
|
[
"Apache-2.0"
] | 2
|
2021-05-22T13:45:06.000Z
|
2022-03-06T18:22:05.000Z
|
import random
HMOD = 2147483647
HBASE1 = random.randrange(HMOD)
HBASE2 = random.randrange(HMOD)
class Hashing:
def __init__(self, s, mod=HMOD, base1=HBASE1, base2=HBASE2):
self.mod, self.base1, self.base2 = mod, base1, base2
self._len = _len = len(s)
f_hash, s_hash = [0] * (_len + 1), [0] * (_len + 1)
for i in range(_len):
f_hash[i + 1] = (base1 * f_hash[i] + s[i]) % mod
s_hash[i + 1] = (base2 * s_hash[i] + s[i]) % mod
self.f_hash, self.s_hash = f_hash, s_hash
def hashed(self, start, stop):
return (
(self.f_hash[stop] - pow(self.base1, stop - start, self.mod) * self.f_hash[start]) % self.mod,
(self.s_hash[stop] - pow(self.base2, stop - start, self.mod) * self.s_hash[start]) % self.mod,
)
def get_hashes(self, length):
mod = self.mod
pow_base1, pow_base2 = pow(self.base1, length, mod), pow(self.base2, length, mod)
f_hash = [(self.f_hash[i + length] - pow_base1 * self.f_hash[i]) % mod for i in range(self._len - length + 1)]
s_hash = [(self.s_hash[i + length] - pow_base2 * self.s_hash[i]) % mod for i in range(self._len - length + 1)]
return f_hash, s_hash
| 40.966667
| 118
| 0.585028
|
70d7553dc15bd3713ac246afb86a928af7c7e1d6
| 7,890
|
py
|
Python
|
backend/a_list_33817/settings.py
|
crowdbotics-apps/a-list-33817
|
c220a0e3192bf0573ff504e3ecb26adaa8e1ff42
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/a_list_33817/settings.py
|
crowdbotics-apps/a-list-33817
|
c220a0e3192bf0573ff504e3ecb26adaa8e1ff42
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/a_list_33817/settings.py
|
crowdbotics-apps/a-list-33817
|
c220a0e3192bf0573ff504e3ecb26adaa8e1ff42
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for a_list_33817 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'a_list_33817.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'a_list_33817.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.346154
| 112
| 0.736502
|
7647cd81df62920d836e52b8e914d6b255e43dab
| 3,670
|
py
|
Python
|
non-tts-utilities/ttslua-file-generator/lib/deckutil.py
|
larikk/TTS-YGO
|
8c1718a01e97e9e972ba0c16c47d14c56354b5d5
|
[
"MIT"
] | null | null | null |
non-tts-utilities/ttslua-file-generator/lib/deckutil.py
|
larikk/TTS-YGO
|
8c1718a01e97e9e972ba0c16c47d14c56354b5d5
|
[
"MIT"
] | null | null | null |
non-tts-utilities/ttslua-file-generator/lib/deckutil.py
|
larikk/TTS-YGO
|
8c1718a01e97e9e972ba0c16c47d14c56354b5d5
|
[
"MIT"
] | null | null | null |
import requests
from functools import cmp_to_key
def printDeck(deck):
print("Name", deck['name'])
print("Code", deck['code'])
print("Date", deck['release-date'])
print("Image", deck['image'])
if 'next' in deck:
print("Next", deck['next'])
cards = ""
for card in deck['cards']:
cards += card['Card number'] + " "
cards += card['Name'].ljust(60) + " "
cards += card['Rarity'].ljust(25) + " "
cards += card['Category'].ljust(30) + " "
if 'Qty' in card:
cards += card['Qty'] + " "
cards += "\n"
print(cards)
#Sorts by rarity and then set code
rarityOrder = ["Secret Rare", "Ultra Rare", "Shatterfoil Rare", "Super Rare", "Rare", "Common"]
def compareCards(a, b):
deltaRarity = a['__r'] - b['__r']
if deltaRarity == 0:
return a['__n'] - b['__n']
return deltaRarity
def sortCards(deck):
cards = deck['cards']
lengthCodeNumber = 0
for c in reversed(cards[0]['Card number']):
if c.isdigit():
lengthCodeNumber += 1
else:
break
# makes comparing above faster and easier
for card in cards:
card['__r'] = rarityOrder.index(card['Rarity'])
card['__n'] = int(card['Card number'][-lengthCodeNumber])
return sorted(cards, key=cmp_to_key(compareCards))
def isExtraDeckCard(cardType):
types = ["Fusion", "Synchro", "XYZ", "Link"]
isExtra = False
for type_ in types:
if type_ in cardType:
isExtra = True
break
return isExtra
def extractCardId(card):
cardVariants = card['card_images']
ids = map(lambda e: e['id'], cardVariants)
id = min(ids)
# Need to make an exception for Dark Magician
# Polymerization alt artwork is fine because alt id is higher than main id
if id == 36996508:
id = 46986414
return str(id)
def attachAdditionalData(deck):
cards = deck['cards']
names = [card['Name'] for card in cards]
names = "|".join(names)
r = requests.get("https://db.ygoprodeck.com/api/v7/cardinfo.php", params={"name": names, "misc": "yes"})
nameToDataMapping = {}
json = r.json()
for cardData in json['data']:
name = cardData['name'].lower()
nameToDataMapping[name] = cardData
# handle renames
for misc in cardData['misc_info']:
if 'beta_name' in misc:
name = misc['beta_name'].lower()
nameToDataMapping[name] = cardData
for card in cards:
name = card['Name'].lower()
data = nameToDataMapping[name]
card['id'] = extractCardId(data)
card['isExtraDeckCard'] = isExtraDeckCard(data['type'])
def asYdkFile(deck):
attachAdditionalData(deck)
main = []
extra = []
for card in deck['cards']:
id = card['id']
isExtra = card['isExtraDeckCard']
n = 1
if 'Qty' in card: n = int(card['Qty'])
for _ in range(n):
if isExtra:
extra.append(id)
else:
main.append(id)
if len(main) > 0:
main = "#main\n" + "\n".join(main) + "\n"
else:
main = "#main\n"
if len(extra) > 0:
extra = "#extra\n" + "\n".join(extra) + "\n"
else:
extra = "#extra\n"
result = "#created by ...\n"
result += main
result += extra
result += "!side"
return result
def asTtsLuaFile(deck):
return f"""\
return {{
code = "{deck['code']}",
name = "{deck['name']}",
releaseDate = "{deck['release-date']}",
image = "{deck['image']}",
cards = [[
{deck['ydk']}
]]
}}
"""
| 24.965986
| 108
| 0.546322
|
3c362ddad8056e5809e8c76c215c503aab497437
| 2,177
|
py
|
Python
|
flowx/imbound/_interface/visco/_interface/_redistance_solid.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | null | null | null |
flowx/imbound/_interface/visco/_interface/_redistance_solid.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | 7
|
2020-03-05T20:39:32.000Z
|
2020-03-13T01:11:26.000Z
|
flowx/imbound/_interface/visco/_interface/_redistance_solid.py
|
Balaras-Group/flowX
|
29c1d6209abbfab553997b557794e4d7b06a09a8
|
[
"BSD-3-Clause"
] | 1
|
2020-03-09T17:38:00.000Z
|
2020-03-09T17:38:00.000Z
|
import numpy
from numba import jit
def redistance_solid(s, soo, dt, dx, dy, nx, ny):
_jit_redistance_solid(s, soo, dt, dx, dy, nx, ny)
return
@jit(nopython=True)
def _jit_redistance_solid(s, soo, dt, dx, dy, nx, ny):
eps = 1e-14
sgn = soo / numpy.abs(soo + eps)
so = numpy.copy(s)
s[:, :] = 0.0
for i in range(1, nx - 1):
for j in range(1, ny - 1):
sm = so[i, j]
sxl = so[i - 1, j]
sxr = so[i + 1, j]
syl = so[i, j - 1]
syr = so[i, j + 1]
if (
(soo[i, j] * soo[i - 1, j] < 0)
or (soo[i, j] * soo[i + 1, j] < 0)
or (soo[i, j] * soo[i, j - 1] < 0)
or (soo[i, j] * soo[i, j + 1] < 0)
):
s[i, j] = soo[i, j]
else:
# - First Order Upwind -----
ap = numpy.maximum((sm - sxl), 0.0) / dx
an = numpy.minimum((sm - sxl), 0.0) / dx
bp = numpy.maximum((sxr - sm), 0.0) / dx
bn = numpy.minimum((sxr - sm), 0.0) / dx
cp = numpy.maximum((sm - syl), 0.0) / dy
cn = numpy.minimum((sm - syl), 0.0) / dy
dp = numpy.maximum((syr - sm), 0.0) / dy
dn = numpy.minimum((syr - sm), 0.0) / dy
if so[i, j] > 0.0:
agf = (
numpy.sqrt(
numpy.maximum(ap**2, bn**2)
+ numpy.maximum(cp**2, dn**2)
)
) - 1.0
elif so[i, j] < 0.0:
agf = (
numpy.sqrt(
numpy.maximum(an**2, bp**2)
+ numpy.maximum(cn**2, dp**2)
)
) - 1.0
else:
agf = 0.0
# - Solve Level Set Re-distance equation ---------
s[i, j] = so[i, j] - dt * (sgn[i, j] * (agf))
if s[i, j] * soo[i, j] < 0.0:
print("WARNING: LS Dist Function Changed Signs - ", i, j)
return
| 26.228916
| 66
| 0.35232
|
ae65e5f190182de9d6144faa2b51c78075e49d5b
| 12,946
|
py
|
Python
|
fed_experiment.py
|
LGNRoy/TSFL
|
bc2a53637bf1c662646815f07e56da41c0466e0e
|
[
"MIT"
] | null | null | null |
fed_experiment.py
|
LGNRoy/TSFL
|
bc2a53637bf1c662646815f07e56da41c0466e0e
|
[
"MIT"
] | null | null | null |
fed_experiment.py
|
LGNRoy/TSFL
|
bc2a53637bf1c662646815f07e56da41c0466e0e
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import pickle
import torch.utils.data
import torch.nn as nn
from GCN_model.utlis.utils import *
from utils import *
from DataTransformer import transform
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--case_name', type=str, default='knn', help='connective functions ("distance","knn","pcc","plv")')
parser.add_argument('--data_dir', type=str, default="./result/ISRUC_S3_knn", help='Data directory')
parser.add_argument('--model', type=str, default='graphsage', help='Model name. Currently supports SAGE, GAT and GCN.')
parser.add_argument('--normalize_features', type=bool, default=False,
help='Whether or not to symmetrically normalize feat matrices')
parser.add_argument('--normalize_adjacency', type=bool, default=False,
help='Whether or not to symmetrically normalize adj matrices')
parser.add_argument('--sparse_adjacency', type=bool, default=False,
help='Whether or not the adj matrix is to be processed as a sparse matrix')
parser.add_argument('--hidden_size', type=int, default=32, help='Size of GNN hidden layer')
parser.add_argument('--node_embedding_dim', type=int, default=32,
help='Dimensionality of the vector space the atoms will be embedded in')
parser.add_argument('--alpha', type=float, default=0.2, help='Alpha value for LeakyRelu used in GAT')
parser.add_argument('--num_heads', type=int, default=2, help='Number of attention heads used in GAT')
parser.add_argument('--dropout', type=float, default=0.3, help='Dropout used between GraphSAGE layers')
parser.add_argument('--readout_hidden_dim', type=int, default=64, help='Size of the readout hidden layer')
parser.add_argument('--graph_embedding_dim', type=int, default=64,
help='Dimensionality of the vector space the molecule will be embedded in')
parser.add_argument('--client_optimizer', type=str, default='adam', metavar="O",
help='SGD with momentum; adam')
parser.add_argument('--lr', type=float, default=0.0015, metavar='LR',
help='learning rate (default: 0.0015)')
parser.add_argument('--batch_size', type=int, default=8, metavar='BS',
help='batch size (default: batch_size)')
parser.add_argument('--wd', help='weight decay parameter;', metavar="WD", type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--frequency_of_the_test', type=int, default=5, help='How frequently to run eval')
parser.add_argument('--device', type=str, default="cuda:0", metavar="DV", help='gpu device for training')
parser.add_argument('--metric', type=str, default='roc-auc', help='Metric to be used to evaluate classification models')
parser.add_argument('--test_freq', type=int, default=1024, help='How often to test')
args = parser.parse_args()
return args
def train_model(args):
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
path = args.data_dir
case_name = args.case_name
epochs = args.epochs
lr = args.lr
batch_size = args.batch_size
transformed_path = path+"/"+case_name
if not os.path.exists(transformed_path):
adj_matrices, feature_matrices, labels = get_data(path)
os.mkdir(transformed_path)
numb_participants = 5
train_mask, test_mask = niid_split_mask(labels)
os.mkdir(transformed_path+"/test")
writer=open("{}/test/{}.pkl".format(transformed_path, "adjacency_matrices"),'wb')
pickle.dump(np.array(adj_matrices)[test_mask], writer)
writer.close()
writer=open("{}/test/{}.pkl".format(transformed_path, "feature_matrices"),'wb')
pickle.dump(np.array(feature_matrices)[test_mask], writer)
writer.close()
np.save("{}/test/{}.npy".format(transformed_path, "labels"), labels[test_mask])
for i, mask in enumerate(train_mask):
os.mkdir(transformed_path+"/Agent{}".format(i))
writer=open("{}/Agent{}/{}.pkl".format(transformed_path, i, "adjacency_matrices"),'wb')
pickle.dump(np.array(adj_matrices)[mask], writer)
writer.close()
writer=open("{}/Agent{}/{}.pkl".format(transformed_path, i, "feature_matrices"),'wb')
pickle.dump(np.array(feature_matrices)[mask], writer)
writer.close()
np.save("{}/Agent{}/{}.npy".format(transformed_path, i, "labels"), labels[mask])
compact = (args.model == 'graphsage')
# 加载数据
train_data_set = []
test_data_set = []
feat_dim = 256
num_cats = 5
num_participants = len(os.listdir(transformed_path))-1 # 分布式网络中,子节点的数量
for folder in os.listdir(transformed_path):
print("Load: {}/{}".format(transformed_path, folder))
loaded_data = get_dataloader(transformed_path+"/"+folder,
compact=compact,
normalize_features=False,
normalize_adj=False)
_, feature_matrices, labels = get_data(transformed_path+"/"+folder)
feat_dim = feature_matrices[0].shape[1]
num_cats = labels[0].shape[0]
print("lenth = %d" % len(loaded_data))
print("feat_dim = %d" % feat_dim)
print("num_cats = %d" % num_cats)
print()
if folder != "test":
train_data_set.append(loaded_data)
print("Train mask")
print(sum(labels))
else:
test_data_set.append(loaded_data)
print("Test mask")
print(sum(labels))
#初始化模型
device = torch.device("cuda:0" if (torch.cuda.is_available() and args.device == 'cuda:0') else "cpu")
os.mkdir(path +"/"+ args.model)
logFile = open(path +"/"+ args.model + "/log.txt", 'a+')
print("logfile:", path +"/"+ args.model + "/log.txt")
global_model = get_model(args, feat_dim, num_cats)
print(global_model.readout)
print(global_model.readout, file=logFile)
participants = [get_model(args, feat_dim, num_cats) for i in range(num_participants)]
print("len_participants:", len(participants))
for participant_model in participants:
sync_participants(participant_model, global_model)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for model in participants:
model.to(device=device, dtype=torch.float32, non_blocking=True)
model.train()
global_model.to(device=device, dtype=torch.float32, non_blocking=True)
global_model.train()
train_loader = train_data_set
test_loader = test_data_set
history_train = [[] for i in participants]
history_test = []
history_CM = []
criterion = torch.nn.BCEWithLogitsLoss(reduction='none')
opts = [torch.optim.Adam(model.parameters(), lr=lr) for model in participants]
best_model = None
best_f1 = 0
for e in range(epochs):
for mol_idxs in range(int(len(train_loader[0])/batch_size)):
participants_loss_train = []
for i in range(len(participants)):
batch_loss = calculate_loss(model=participants[i],
dataloader=iter(train_loader[i]),
batch_size=batch_size,
device=device,
criterion=criterion,
is_sage=compact)
optimizer = opts[i]
optimizer.zero_grad()
participants_loss_train.append(batch_loss)
batch_loss.backward()
optimizer.step()
history_train[i].append(batch_loss)
weight_aggregate(global_model, participants)
if mol_idxs % 5 == 0 or mol_idxs==int(len(train_loader[0])/batch_size)-1:
global_loss_test = calculate_loss(model=global_model,
dataloader=iter(test_loader[0]),
batch_size=batch_size*8,
device=device,
criterion=criterion,
is_sage=compact)
acc, f1, cm = acc_f1(global_model, iter(test_loader[0]), device, compact)
print('Train epoch {:^3} at batch {:^5} with global accuracy {:5.4f}, F1 score {:5.4f}, test loss {:5.4f}, participants train loss [{:5.4f}, {:5.4f}, {:5.4f}] [({:2.0f}%)]'.format(
e, mol_idxs,
acc, f1,
global_loss_test,
participants_loss_train[0], participants_loss_train[1], participants_loss_train[2],
mol_idxs / int(len(train_loader[0])/batch_size) * 100), file=logFile)
history_test.append(global_loss_test)
print(cm, file=logFile)
print('Train epoch {:^3} at batch {:^5} with global accuracy {:5.4f}, F1 score {:5.4f}, test loss {:5.4f}, participants train loss [{:5.4f}, {:5.4f}, {:5.4f}] [({:2.0f}%)]'.format(
e, mol_idxs,
acc, f1,
global_loss_test,
participants_loss_train[0], participants_loss_train[1], participants_loss_train[2],
mol_idxs / int(len(train_loader[0])/batch_size) * 100))
history_test.append(global_loss_test)
# print(cm)
history_CM.append(cm)
if f1 > best_f1:
best_model = global_model
best_f1 = f1
print("", file=logFile)
print()
logFile.close()
np.save(path +"/"+ args.model+"/history_CM",np.array(history_CM))
np.save(path +"/"+ args.model+"/history_test",np.array([loss.cpu().detach().numpy() for loss in history_test]))
np.save(path +"/"+ args.model+"/history_train",np.array([[loss.cpu().detach().numpy() for loss in parti] for parti in history_train]))
torch.save(global_model,path +"/"+ args.model+"/global_model.model")
for i, participant in enumerate(participants):
torch.save(participant,path +"/"+ args.model+"/participant_model_{}.model".format(i))
best_model.eval()
best_model.to(device)
with torch.no_grad():
y_pred = []
y_true = []
masks = []
if compact:
for mol_idx, (forest, feature_matrix, label, mask) in enumerate(iter(test_loader[0])):
forest = [level.to(device=device, dtype=torch.long, non_blocking=True) for level in forest]
feature_matrix = feature_matrix.to(device=device, dtype=torch.float32, non_blocking=True)
logits = best_model(forest, feature_matrix)
y_pred.append(nn.Sigmoid()(logits).cpu().numpy())
y_true.append(label.numpy())
masks.append(mask.numpy())
else:
for mol_idx, (adj_matrix, feature_matrix, label, mask) in enumerate(iter(test_loader[0])):
adj_matrix = adj_matrix.to(device=device, dtype=torch.float32, non_blocking=True)
feature_matrix = feature_matrix.to(device=device, dtype=torch.float32, non_blocking=True)
logits = best_model(adj_matrix, feature_matrix)
y_pred.append(nn.Sigmoid()(logits).cpu().numpy())
y_true.append(label.numpy())
masks.append(mask.numpy())
y_pred = np.array(y_pred)
y_true = np.array(y_true)
masks = np.array(masks)
AllPred = np.argmax(y_pred, axis=1)
AllTrue = np.argmax(y_true, axis=1)
PrintScore(AllTrue, AllPred, savePath=path +"/"+ args.model+"/")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = add_args(parser)
path = {
'data':"./data/ISRUC_S3/ISRUC_S3.npz",
'save':args.data_dir+"/",
"cheb_k" : 3,
"disM" : "./data/ISRUC_S3/DistanceMatrix.npy",
"feature" :'./output/Feature_0.npz'
}
transform(path, args.case_name)
train_model(args)
#python fed_experiment.py --model gat --case_name knn --data_dir ./result/ISRUC_S3_knn
#python fed_experiment.py --model gcn --case_name knn --data_dir ./result/ISRUC_S3_knn
#python fed_experiment.py --model graphsage --case_name knn --data_dir ./result/ISRUC_S3_knn
| 40.080495
| 196
| 0.596941
|
3b94ea132d8d82fdfc9f92dd9539d52ddc3ff2f2
| 7,015
|
py
|
Python
|
third_party/maya/lib/pxrUsdMayaGL/testenv/testProxyShapeLiveSurface.py
|
YuqiaoZhang/USD
|
bf3a21e6e049486441440ebf8c0387db2538d096
|
[
"BSD-2-Clause"
] | 88
|
2018-07-13T01:22:00.000Z
|
2022-01-16T22:15:27.000Z
|
third_party/maya/lib/pxrUsdMayaGL/testenv/testProxyShapeLiveSurface.py
|
YuqiaoZhang/USD
|
bf3a21e6e049486441440ebf8c0387db2538d096
|
[
"BSD-2-Clause"
] | 1
|
2020-07-07T22:39:42.000Z
|
2020-07-07T22:39:42.000Z
|
third_party/maya/lib/pxrUsdMayaGL/testenv/testProxyShapeLiveSurface.py
|
YuqiaoZhang/USD
|
bf3a21e6e049486441440ebf8c0387db2538d096
|
[
"BSD-2-Clause"
] | 26
|
2018-06-06T03:39:22.000Z
|
2021-08-28T23:02:42.000Z
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import UsdMaya
# Maya 2017 and later use PyQt5/PySide2 while Maya 2016 and earlier use
# PyQt4/PySide. We test whether we're running in Maya 2017+ by trying to import
# PySide2, which should only be available there. If that succeeds, we import
# the rest of the modules from PySide2. Otherwise, we assume we're in 2016 or
# earlier and we import everything from PySide.
try:
import PySide2
usePySide2 = True
except ImportError:
usePySide2 = False
if usePySide2:
from PySide2 import QtCore
from PySide2.QtTest import QTest
from PySide2.QtWidgets import QWidget
from shiboken2 import wrapInstance
else:
from PySide import QtCore
from PySide.QtTest import QTest
from PySide.QtGui import QWidget
from shiboken import wrapInstance
from maya import OpenMayaUI as OMUI
from maya import cmds
import os
import sys
import unittest
class testProxyShapeLiveSurface(unittest.TestCase):
@classmethod
def setUpClass(cls):
# The test USD data is authored Z-up, so make sure Maya is configured
# that way too.
cmds.upAxis(axis='z')
cmds.loadPlugin('pxrUsd')
def setUp(self):
cmds.file(
os.path.abspath('ProxyShapeLiveSurfaceTest.ma'),
open=True, force=True)
def testObjectPosition(self):
"""
Tests that an object created interactively is positioned correctly on
the live surface.
"""
# Load our reference assembly.
UsdMaya.LoadReferenceAssemblies()
# Create a new custom viewport.
window = cmds.window(widthHeight=(500, 400))
cmds.paneLayout()
panel = cmds.modelPanel()
cmds.modelPanel(panel, edit=True, camera='persp')
cmds.modelEditor(cmds.modelPanel(panel, q=True, modelEditor=True),
edit=True, displayAppearance='smoothShaded', rnm='vp2Renderer')
cmds.showWindow(window)
# Get the viewport widget.
view = OMUI.M3dView()
OMUI.M3dView.getM3dViewFromModelPanel(panel, view)
viewWidget = wrapInstance(long(view.widget()), QWidget)
# Make our assembly live.
cmds.makeLive('Block_1')
# Enter interactive creation context.
cmds.setToolTo('CreatePolyTorusCtx')
# Click in the center of the viewport widget.
QTest.mouseClick(viewWidget, QtCore.Qt.LeftButton, QtCore.Qt.NoModifier,
viewWidget.rect().center())
# Find the torus (it should be called pTorus1).
self.assertTrue(cmds.ls('pTorus1'))
# Check the torus's transform.
# The Block_1 is originally 1 unit deep and centered at the origin, so
# its original top plane is aligned at z=0.5.
# It is scaled 5x, which makes the top plane aligned at z=2.5.
# Then it is translated along z by 4.0, so its top plane is finally
# aligned at z=6.5.
translate = cmds.xform('pTorus1', q=True, t=True)
self.assertAlmostEqual(translate[2], 6.5, places=3)
def testObjectNormal(self):
"""
Tests that an object created interactively by dragging in the viewport
has the correct orientation based on the live surface normal.
"""
from pxr import Gf
# Load our reference assembly.
UsdMaya.LoadReferenceAssemblies()
# Create a new custom viewport.
window = cmds.window(widthHeight=(500, 400))
cmds.paneLayout()
panel = cmds.modelPanel()
cmds.modelPanel(panel, edit=True, camera='persp')
cmds.modelEditor(cmds.modelPanel(panel, q=True, modelEditor=True),
edit=True, displayAppearance='smoothShaded', rnm='vp2Renderer')
cmds.showWindow(window)
# Get the viewport widget.
view = OMUI.M3dView()
OMUI.M3dView.getM3dViewFromModelPanel(panel, view)
viewWidget = wrapInstance(long(view.widget()), QWidget)
# Make our assembly live.
cmds.makeLive('Block_2')
# Enter interactive creation context.
cmds.setToolTo('CreatePolyConeCtx')
# Click in the center of the viewport widget.
QTest.mouseClick(viewWidget, QtCore.Qt.LeftButton, QtCore.Qt.NoModifier,
viewWidget.rect().center())
# Find the cone (it should be called pCone1).
self.assertTrue(cmds.ls('pCone1'))
# Check the cone's rotation.
# Because our scene is Z-axis up, the cone's Z-axis should be aligned
# with Block_2's surface normal (though it might not necessarily have
# the same exact rotation).
rotationAngles = cmds.xform('pCone1', q=True, ro=True)
rotation = (Gf.Rotation(Gf.Vec3d.XAxis(), rotationAngles[0]) *
Gf.Rotation(Gf.Vec3d.YAxis(), rotationAngles[1]) *
Gf.Rotation(Gf.Vec3d.ZAxis(), rotationAngles[2]))
actualZAxis = rotation.TransformDir(Gf.Vec3d.ZAxis())
expectedRotation = (Gf.Rotation(Gf.Vec3d.XAxis(), 75.0) *
Gf.Rotation(Gf.Vec3d.YAxis(), 90.0))
expectedZAxis = expectedRotation.TransformDir(Gf.Vec3d.ZAxis())
# Verify that the error angle between the two axes is less than
# 0.1 degrees (less than ~0.0003 of a revolution, so not bad). That's
# about as close as we're going to get.
errorRotation = Gf.Rotation(actualZAxis, expectedZAxis)
self.assertLess(errorRotation.GetAngle(), 0.1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(
testProxyShapeLiveSurface)
results = unittest.TextTestRunner(stream=sys.stdout).run(suite)
if results.wasSuccessful():
exitCode = 0
else:
exitCode = 1
# maya running interactively often absorbs all the output. comment out the
# following to prevent maya from exiting and open the script editor to look
# at failures.
cmds.quit(abort=True, exitCode=exitCode)
| 37.31383
| 80
| 0.673557
|
7232bc1c958207c5977daa5aeda123682d01a739
| 4,017
|
py
|
Python
|
generate_csr.py
|
pagopa/le-azure-acme-tiny
|
eb1e6a08af72bcc17a971f101c94223093889fb1
|
[
"MIT"
] | null | null | null |
generate_csr.py
|
pagopa/le-azure-acme-tiny
|
eb1e6a08af72bcc17a971f101c94223093889fb1
|
[
"MIT"
] | null | null | null |
generate_csr.py
|
pagopa/le-azure-acme-tiny
|
eb1e6a08af72bcc17a971f101c94223093889fb1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import logging
import textwrap
import sys
import cryptography.hazmat.primitives
import cryptography.x509
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.INFO)
def get_csr(common_name, out, keyout, rsa_key_size):
log = LOGGER
# generate private_key, RSA type
# TODO support ECDSA type of certificates
log.info("Generating a RSA private key...")
private_key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # this is the RSA e exponent. DO NOT CHANGE THIS VALUE!
key_size=rsa_key_size
)
# save private_key
with open(keyout, "wb") as f:
f.write(
private_key.private_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8,
encryption_algorithm=cryptography.hazmat.primitives.serialization.NoEncryption()
)
)
log.info("Private key saved to %s", keyout)
# CSR creation
log.info("Building a Certificate Signing Request (CSR)...")
builder = cryptography.x509.CertificateSigningRequestBuilder()
# set Common Name
builder = builder.subject_name(cryptography.x509.Name(
[cryptography.x509.NameAttribute(
cryptography.x509.oid.NameOID.COMMON_NAME, common_name)]
))
# set Basic Constraints
builder = builder.add_extension(cryptography.x509.BasicConstraints(
ca=False, path_length=None), critical=True)
# set Key Usage
builder = builder.add_extension(cryptography.x509.KeyUsage(
digital_signature=True, key_encipherment=True, content_commitment=False,
data_encipherment=False, key_agreement=False, key_cert_sign=False, crl_sign=False,
encipher_only=False, decipher_only=False),
critical=True
)
# set Extended Key Usage
builder = builder.add_extension(
cryptography.x509.ExtendedKeyUsage(
[cryptography.x509.oid.ExtendedKeyUsageOID.SERVER_AUTH]),
critical=False
)
# set Common Name in SAN field too
builder = builder.add_extension(
cryptography.x509.SubjectAlternativeName(
[cryptography.x509.DNSName(common_name)]),
critical=False
)
# sign the CSR with private key
csr = builder.sign(
private_key, cryptography.hazmat.primitives.hashes.SHA256())
# save CSR to file
with open(out, "wb") as f:
f.write(
csr.public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.DER)
)
log.info("CSR saved to %s", out)
def main(argv=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script generates a CSR in DER format.
Example Usage:
python3 generate_csr.py --common-name example.com --keyout csr.key --out csr.der --rsa-key-size 2048
""")
)
parser.add_argument("--common-name", required=True,
help="X509 Common Name string")
parser.add_argument("--quiet", action="store_const",
const=logging.ERROR, help="Suppress output except for errors")
parser.add_argument("--rsa-key-size", default=2048, type=int,
choices=[2048, 3072, 4096], help="RSA key size in bits")
parser.add_argument("--out", default="csr.der",
help="Destination of the CSR")
parser.add_argument("--keyout", default="csr.key",
help="Destination of the CSR private key")
args = parser.parse_args(argv)
LOGGER.setLevel(args.quiet or LOGGER.level)
# TODO add support for arbitrary SAN fields
get_csr(args.common_name, args.out, args.keyout, args.rsa_key_size)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.189189
| 112
| 0.669156
|
4b028708c31ecb3d0a7d4a47ab3ba64a875dc025
| 10,708
|
py
|
Python
|
GNN/gnn.py
|
BrandonBian/OSDR-GNN
|
0f631d5ddad77df7260c11de3507af014f9447ed
|
[
"MIT"
] | null | null | null |
GNN/gnn.py
|
BrandonBian/OSDR-GNN
|
0f631d5ddad77df7260c11de3507af014f9447ed
|
[
"MIT"
] | null | null | null |
GNN/gnn.py
|
BrandonBian/OSDR-GNN
|
0f631d5ddad77df7260c11de3507af014f9447ed
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn.inits import glorot
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import add_self_loops, softmax, degree
class GIN(MessagePassing):
def __init__(self, emb_dim):
super(GIN, self).__init__()
self.mlp = torch.nn.Sequential(
nn.Linear(emb_dim, 2 * emb_dim),
nn.BatchNorm1d(2 * emb_dim),
nn.ReLU(),
nn.Linear(2 * emb_dim, emb_dim))
def forward(self, x, edge_index, edge_attr): # Here
return self.propagate(edge_index=edge_index, x=x, edge_attr=edge_attr)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return self.mlp(aggr_out) # MLP
class GCN(MessagePassing):
def __init__(self, emb_dim):
super(GCN, self).__init__()
self.linear = nn.Linear(emb_dim, emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
self_loop_attr = torch.zeros((x.size(0), edge_attr.size(1)), dtype=edge_attr.dtype).to(x.device)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
row, col = edge_index
deg = degree(col, x.size(0), dtype=x.dtype)
deg_inv_sqrt = deg.pow(-0.5)
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr=edge_attr, norm=norm)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * (x_j + edge_attr)
class GAT(MessagePassing):
def __init__(self, emb_dim):
super(GAT, self).__init__()
self.att = nn.Parameter(torch.Tensor(1, 2 * emb_dim))
glorot(self.att)
def forward(self, x, edge_index, edge_attr):
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
self_loop_attr = torch.zeros((x.size(0), edge_attr.size(1)), dtype=edge_attr.dtype).to(x.device)
edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
return self.propagate(edge_index=edge_index, x=x, edge_attr=edge_attr)
def message(self, edge_index, x_i, x_j, edge_attr):
x_j = x_j + edge_attr
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, 0.2)
alpha = softmax(alpha, edge_index[0])
return x_j * alpha.view(-1, 1)
class GraphSAGE(MessagePassing):
def __init__(self, emb_dim):
super(GraphSAGE, self).__init__(aggr='mean')
self.linear = nn.Linear(emb_dim, emb_dim)
def forward(self, x, edge_index, edge_attr):
return self.propagate(edge_index=edge_index, x=x, edge_attr=edge_attr)
def message(self, x_j, edge_attr):
return x_j + edge_attr
def update(self, aggr_out):
return F.normalize(aggr_out, p=2, dim=-1)
class GraphNN(torch.nn.Module): # TODO: some hierarchical information that we need to change?
def __init__(self, node_dim, edge_dim, hid_dim, hier_dim, num_class, num_layers, network):
super(GraphNN, self).__init__()
self.drop = nn.Dropout(p=0.1)
self.network = network
self.hier_dim = hier_dim
self.num_class = num_class
self.num_layers = num_layers
self.linear_node = nn.Linear(node_dim, hid_dim)
self.linear_edge = nn.Linear(edge_dim, hid_dim)
self.output_layer = nn.Sequential(
# TODO: if everything works fine, then hier_dim = 0 for material predictions.
nn.Linear(hier_dim + hid_dim, 2 * hid_dim),
nn.BatchNorm1d(2 * hid_dim),
nn.PReLU(),
nn.Linear(2 * hid_dim, num_class)
)
self.layers = nn.ModuleList()
self.lin_layers = nn.ModuleList()
for layer in range(self.num_layers):
if self.network == 'gin':
self.layers.append(GIN(hid_dim))
elif self.network == 'gcn':
self.layers.append(GCN(hid_dim))
elif self.network == 'gat':
self.layers.append(GAT(hid_dim))
elif self.network == 'sage':
self.layers.append(GraphSAGE(hid_dim))
self.lin_layers.append(nn.Linear(hid_dim, hid_dim))
def forward(self, x, edge_index, e, c): # TODO: this part is essential, figure out what is it doing?
# GraphNN forward() parameters:
# x = x - node features
# edge = edge_index - 2 nodes to each edge (compressed version of the adj matrix)
# e = e - edge features
# c = 0/y1/y2 - ground truth input predictions (Teacher forcing)
# different feature space, project to similar feature dimension
h = self.linear_node(x) # linear transformation (project from node dimension to hidden dimension)
e = self.linear_edge(e) # linear transformation
h_list = [h]
for layer in range(self.num_layers): # for each GNN layer
if self.network != 'gin': # default is GIN, so this part can be ignored
h = self.lin_layers[layer](h_list[layer])
h = self.layers[layer](h_list[layer], edge_index, e) # Passing data to one layer of GNN
h = self.drop(F.leaky_relu(h, negative_slope=0.2)) # Take node embeddings, activation, drop-out
h_list.append(h)
# take node embeddings of all layers and take sum (residual network - uses skip connections)
h_list = [h.unsqueeze_(0) for h in h_list]
h = torch.sum(torch.cat(h_list), 0)
# if commented out previous two lines, will give the embeddings of the last layer
if self.hier_dim > 0:
h = self.output_layer(torch.cat((h, c), dim=-1))
else:
h = self.output_layer(h)
return h
class HierarchicalGNN(nn.Module):
def __init__(self, node_dim, edge_dim, hid_dim, num_class_l1, num_class_l2, num_class_l3, num_layers, network):
super(HierarchicalGNN, self).__init__()
self.gnn_level1 = GraphNN(node_dim, edge_dim, hid_dim, 0, num_class_l1, num_layers, network)
self.gnn_level2 = GraphNN(node_dim, edge_dim, hid_dim, num_class_l1, num_class_l2, num_layers, network)
self.gnn_level3 = GraphNN(node_dim, edge_dim, hid_dim, num_class_l2, num_class_l3, num_layers, network)
def forward(self, x, edge_index, e, y1, y2):
# gnn.forward() parameters:
# x = batch.x.float() - node attributes
# edge_index = batch.edge_index - 2 nodes to each edge
# e = batch.e.float() - edge attributes
# y1 = F.one_hot(batch.y1, self.num_class_l1) - tier 1 function
# y2 = F.one_hot(batch.y2, self.num_class_l2) - tier 2 function
# GraphNN forward() parameters:
# x = x - node attributes
# edge = edge_index - 2 nodes to each edge
# e = e - edge attributes
# c = 0/y1/y2 - ground truth input predictions (Teacher forcing)
# compute tier predictions (softmax?)
# First MLP no predecessor so just vector of 0 as input predictions
# TODO: are these the MLPs?
yp_l1 = self.gnn_level1(x, edge_index, e, 0)
yp_l2 = self.gnn_level2(x, edge_index, e, y1)
yp_l3 = self.gnn_level3(x, edge_index, e, y2)
return yp_l1, yp_l2, yp_l3 # returns tier predictions
@torch.no_grad()
def predict(self, x, edge_index, e):
yp_l1 = F.softmax(self.gnn_level1(x, edge_index, e, 0), dim=-1)
yp_l2 = F.softmax(self.gnn_level2(x, edge_index, e, yp_l1), dim=-1)
yp_l3 = F.softmax(self.gnn_level3(x, edge_index, e, yp_l2), dim=-1)
return yp_l1, yp_l2, yp_l3
class CustomizedGNN(nn.Module):
def __init__(self, node_dim, edge_dim, hid_dim, num_materials, num_layers, network):
super(CustomizedGNN, self).__init__()
# TODO: num_materials for GraphNN, only one gnn level needed - done
self.gnn_level1 = GraphNN(node_dim, edge_dim, hid_dim, 0, num_materials, num_layers, network)
def forward(self, x, edge_index, e):
# TODO: acquire the material predictions, only one gnn layer - done
material_predictions = self.gnn_level1(x, edge_index, e, 0) # 0 here emulate input predictions
return material_predictions
@torch.no_grad()
def predict(self, x, edge_index, e):
# TODO: acquire the material predictions, only one gnn layer - done
material_predictions = F.softmax(self.gnn_level1(x, edge_index, e, 0), dim=-1)
return material_predictions
class MLP(nn.Module):
def __init__(self, node_dim, hid_dim, num_class_l1, num_class_l2, num_class_l3):
super(MLP, self).__init__()
self.mlp_level1 = nn.Sequential(
nn.Linear(node_dim, hid_dim),
nn.ReLU(),
nn.Dropout(p=0.1),
nn.Linear(hid_dim, num_class_l1)
)
self.mlp_level2 = nn.Sequential(
nn.Linear(node_dim + num_class_l1, hid_dim),
nn.ReLU(),
nn.Dropout(p=0.1),
nn.Linear(hid_dim, num_class_l2)
)
self.mlp_level3 = nn.Sequential(
nn.Linear(node_dim + num_class_l2, hid_dim),
nn.ReLU(),
nn.Dropout(p=0.1),
nn.Linear(hid_dim, num_class_l3)
)
def forward(self, x, y1, y2):
yp_l1 = self.mlp_level1(x)
yp_l2 = self.mlp_level2(torch.cat((x, y1), dim=-1))
yp_l3 = self.mlp_level3(torch.cat((x, y2), dim=-1))
return yp_l1, yp_l2, yp_l3
@torch.no_grad()
def predict(self, x):
yp_l1 = F.softmax(self.mlp_level1(x), dim=-1)
yp_l2 = F.softmax(self.mlp_level2(torchcat((x, yp_l1), dim=-1)), dim=-1)
yp_l3 = F.softmax(self.mlp_level3(torch.cat((x, yp_l2), dim=-1)), dim=-1)
return yp_l1, yp_l2, yp_l3
class Linear(nn.Module):
def __init__(self, node_dim, hid_dim, num_class_l1, num_class_l2, num_class_l3):
super(Linear, self).__init__()
self.linear_l1 = nn.Linear(node_dim, num_class_l1)
self.linear_l2 = nn.Linear(node_dim + num_class_l1, num_class_l2)
self.linear_l3 = nn.Linear(node_dim + num_class_l2, num_class_l3)
def forward(self, x, y1, y2):
yp_l1 = self.linear_l1(x)
yp_l2 = self.linear_l2(torch.cat((x, y1), dim=-1))
yp_l3 = self.linear_l3(torch.cat((x, y2), dim=-1))
return yp_l1, yp_l2, yp_l3
@torch.no_grad()
def predict(self, x):
yp_l1 = F.softmax(self.linear_l1(x), dim=-1)
yp_l2 = F.softmax(self.linear_l2(torch.cat((x, yp_l1), dim=-1)), dim=-1)
yp_l3 = F.softmax(self.linear_l3(torch.cat((x, yp_l2), dim=-1)), dim=-1)
return yp_l1, yp_l2, yp_l3
if __name__ == "__main__":
pass
| 40.407547
| 115
| 0.631117
|
253ae8541403b78175a056e3981770d02ec2b4a9
| 17,584
|
py
|
Python
|
patrickstar/core/preprocess.py
|
zhuzilin/PatrickStar
|
72daf8dbded07e03d911db6369b075c9bfcd5245
|
[
"BSD-3-Clause"
] | null | null | null |
patrickstar/core/preprocess.py
|
zhuzilin/PatrickStar
|
72daf8dbded07e03d911db6369b075c9bfcd5245
|
[
"BSD-3-Clause"
] | 2
|
2022-03-15T06:57:11.000Z
|
2022-03-15T07:55:31.000Z
|
patrickstar/core/preprocess.py
|
zhuzilin/PatrickStar
|
72daf8dbded07e03d911db6369b075c9bfcd5245
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import contextlib
import functools
import torch
from patrickstar.core import PatrickStarClient, AccessType, ChunkType
from patrickstar.core import register_param, is_param_registered, ParamType
from patrickstar.manager import _runtime_config
from patrickstar.ops import Embedding
from patrickstar.utils import logger, print_rank, get_rank, get_world_size
from patrickstar.utils import see_memory_usage
_orig_torch_empty = torch.empty
def empty_cpu_tensor_half(*size, **kwargs):
if "device" not in kwargs.keys():
kwargs["device"] = torch.device("cpu:0")
tensor = _orig_torch_empty(*size, **kwargs)
if tensor.is_floating_point():
return tensor.half()
else:
return tensor
def new_cpu_tensor_half(cls, *args):
device = torch.device("cpu:0")
tensor = torch.ones((1, 1), device=device).new_empty(*args).half()
print_rank(
f"During model initialization, a new tensor of shape {tensor.shape} is created."
)
if tensor.is_floating_point():
return tensor.half()
else:
return tensor
def empty_cpu_tensor(*size, **kwargs):
if "device" not in kwargs.keys():
kwargs["device"] = torch.device("cpu:0")
tensor = _orig_torch_empty(*size, **kwargs)
return tensor
def new_cpu_tensor(cls, *args):
device = torch.device("cpu:0")
tensor = torch.ones((1, 1), device=device).new_empty(*args)
return tensor
@contextlib.contextmanager
def torch_scope(do_allreduce=True):
r"""All parameters initialized in this scope will not be managed in chunks."""
_runtime_config.push()
_runtime_config.config["use_chunk"] = False
_runtime_config.config["do_allreduce"] = do_allreduce
yield
_runtime_config.pop()
def cast_forward(module, dtype):
if not isinstance(dtype, torch.dtype):
raise ValueError("dtype should be of torch.dtype.")
old_forward = module.forward
def forward(*args, **kwargs):
casted_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and torch.is_floating_point(arg):
casted_args.append(arg.to(dtype))
else:
casted_args.append(arg)
casted_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, torch.Tensor) and torch.is_floating_point(v):
casted_kwargs[k] = v.to(dtype)
else:
casted_kwargs[k] = v
return old_forward(*casted_args, **casted_kwargs)
module.forward = forward
# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
def __init__(self, config=None, dtype=None):
self._set_dtype(config, dtype)
assert self.dtype in [
torch.half,
torch.float,
], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.float]"
def __enter__(self):
def preprocess_after(f):
@functools.wraps(f)
def wrapper(module, *args, **kwargs):
print_rank(
f"Before initializing {module.__class__.__name__}", force=False
)
f(module, *args, **kwargs)
self._post_init_method(module)
print_rank(
f"After initializing followed by post init for {module.__class__.__name__}",
force=False,
)
return wrapper
def _enable_class(cls):
cls._old_init = cls.__init__
cls.__init__ = preprocess_after(cls.__init__)
def _init_subclass(cls, **kwargs):
cls.__init__ = preprocess_after(cls.__init__)
# Replace .__init__() for all existing subclasses of torch.nn.Module
for subclass in torch.nn.modules.module.Module.__subclasses__():
if subclass == torch.nn.Embedding:
_enable_class(Embedding)
else:
_enable_class(subclass)
# holding on to the current __init__subclass__ for exit
torch.nn.modules.module.Module._old_init_subclass = (
torch.nn.modules.module.Module.__init_subclass__
)
torch.Tensor.__old_new__ = torch.Tensor.__new__
# Replace .__init__() for future subclasses of torch.nn.Module
torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
if self.dtype == torch.half:
torch.Tensor.__new__ = new_cpu_tensor_half
torch.empty = empty_cpu_tensor_half
else:
torch.Tensor.__new__ = new_cpu_tensor
torch.empty = empty_cpu_tensor
self._pre_context_exec()
def __exit__(self, exc_type, exc_value, traceback):
def _disable_class(cls):
cls.__init__ = cls._old_init
# Replace .__init__() for all existing subclasses of torch.nn.Module
for subclass in torch.nn.modules.module.Module.__subclasses__():
if subclass == torch.nn.Embedding:
_disable_class(Embedding)
else:
_disable_class(subclass)
# Replace .__init__() for future subclasses of torch.nn.Module
torch.nn.modules.module.Module.__init_subclass__ = (
torch.nn.modules.module.Module._old_init_subclass
)
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = _orig_torch_empty
self._post_context_exec()
# Now that we cleaned up the metaclass injection, raise the exception.
if exc_type is not None:
return False
# To be implemented by inheriting classes
def _post_init_method(self, module):
pass
def _pre_context_exec(self):
pass
def _post_context_exec(self):
pass
def _set_dtype(self, ds_config, dtype):
if dtype is None:
self.dtype = torch.half
else:
self.dtype = dtype
class PSPreProcessCtx(InsertPostInitMethodToModuleSubClasses):
"""
A context to initialize model
"""
def __init__(
self,
client: PatrickStarClient,
release_after_init=False,
use_cpu_embedding=False,
dtype=None,
):
super().__init__(config=None, dtype=dtype)
self.rank = get_rank()
self.world_size = get_world_size()
self.client = client
self.dummy_param_list = []
self.param_idx = 0
self.release_after_init = release_after_init
self.use_cpu_embedding = use_cpu_embedding
self.submodule_id = -1
def _pre_context_exec(self):
Embedding.use_cpu = self.use_cpu_embedding
def _new(cls, *args, **kwargs):
embedding = object.__new__(Embedding)
return embedding
torch.nn.Embedding.__new__ = _new
def _post_context_exec(self):
"""The callback function when the context exits.
1. Copy param.data to fp16 and fp32 chunk based params.
2. Append dummy chunk so that the number of chunks is an integer multiple of
number of processes.
3. Add a dummy param at the start of CPU Embedding for huggingface.
"""
logger.info("Post Model Init Context")
def _origin_new(cls, *arg, **kwargs):
return object.__new__(cls)
torch.nn.Embedding.__new__ = _origin_new
if Embedding.use_cpu:
for instance in Embedding.instances:
# A walkaround for huggingface.
# Huggingface will use the type of the first parameter as the
# dtype of the module. And we need the module to be identified as
# fp16 for the mixed precision training in patrickstar.
# However, when use_cpu_embedding is True, the weight of embedding
# remains to fp32 (otherwise cause error on older version of pytorch).
# As the embedding is usually the first submodule, we insert a
# dummy fp16 Parameter as the placeholder.
#
# TODO(zilinzhu) Figure out why dummy in the __init__ of Embedding will
# cause numeric error.
instance.dummy = torch.nn.Parameter(
torch.tensor([], dtype=torch.half), requires_grad=False
)
register_param(
instance.dummy,
ParamType.TORCH_BASED,
torch.half,
"embedding_dummy",
)
instance._parameters.move_to_end("dummy", last=False)
# Clean the members to prevent elements not grabage collected.
Embedding.instances = []
Embedding.use_cpu = False
chunk_num = 0
for param_fp16_chunk_id, param_fp32_chunk_id in zip(
self.client.chunk_ids_generator(ChunkType.PARAM_FP16),
self.client.chunk_ids_generator(ChunkType.PARAM_FP32),
):
if self.client.chunk_tensor_index.is_local_chunk(param_fp16_chunk_id):
for param_fp16, param_fp32 in zip(
self.client.chunk_tensor_index.params_generator(
param_fp16_chunk_id
),
self.client.chunk_tensor_index.params_generator(
param_fp32_chunk_id
),
):
if is_param_registered(param_fp32) and is_param_registered(
param_fp16
):
ps_data_fp16 = self.client.access_data(
param_fp16, torch.device("cpu:0")
)
ps_data_fp32 = self.client.access_data(
param_fp32, torch.device("cpu:0")
)
# Here the dtype of param_fp16 is actually fp32.
ps_data_fp16.copy_(param_fp16.data)
ps_data_fp32.copy_(param_fp16.data)
self.client.release_data(param_fp16)
self.client.release_data(param_fp32)
param_fp16 = param_fp16.to(torch.half)
else:
for param_fp16 in self.client.chunk_tensor_index.params_generator(
param_fp16_chunk_id
):
assert not self.client.is_local_param(param_fp16, AccessType.DATA)
# When release_after_init is True, we will release the remote
# param tensor here.
# When release_after_init is False, this will help cast dtype of
# remote params to torch.half (See the NOTE below).
param_fp16.data = torch.tensor(
[], dtype=torch.half, device=param_fp16.device
)
chunk_num += 1
world_size = get_world_size()
logger.info(f"param fp16 chunk num {chunk_num}")
while chunk_num % world_size != 0:
self.client.append_dummy_chunk(torch.half, ChunkType.PARAM_FP16)
chunk_num += 1
def _post_init_method(self, module):
r"""The function to call at the end of the constructor of each nn.Module.
The main functionality is registering the params to chunks and
remove the remote tensor if `release_after_init` is False.
"""
self.submodule_id += 1
see_memory_usage(
f"Before converting parmas in {module.__class__.__name__}", force=False
)
if self.use_cpu_embedding:
# In CPU embedding optimization,
# the embedding is managed by torch instead of chunk.
if module.__class__.__name__ == "Embedding":
logger.debug(
f"** Converting Maintain PyTorch Params in {module.__class__.__name__}"
)
for name, param in module.named_parameters(recurse=False):
param_fp32 = torch.nn.Parameter(param.data.clone())
register_param(
param, ParamType.TORCH_BASED, torch.float, f"embedding_{name}"
)
self.client.torch_param_allreduce_list.append(param)
return
print_rank(f"Converting Params in {module.__class__.__name__}", force=False)
if not _runtime_config.use_chunk:
for name, param in module.named_parameters(recurse=False):
name = f"{module.__class__.__name__}.{name}_{self.param_idx}"
register_param(param, ParamType.TORCH_BASED, torch.float, name)
if _runtime_config.do_allreduce:
self.client.torch_param_allreduce_list.append(param)
# We need to cast the inputs to fp32 for the unmanaged modules.
cast_forward(module, torch.float)
return
# The granularity of `post_init_method` is nn.Module, e.g. BertAttention.
# For every process, we will initialize the params.
# NOTE() The order of params in model initialization is a bit different from the
# the one in optimizer parameter group.
param_fp16_list = []
param_fp32_list = []
for name, param in module.named_parameters(recurse=False):
name = f"{module.__class__.__name__}.{name}_{self.param_idx}"
register_param(param, ParamType.CHUNK_BASED, torch.half, name)
self.param_idx += 1
param_fp16_list.append(param)
logger.debug(
f"** Converting Params {name} in module id {self.submodule_id}"
)
# Append a tensor to the param fp32 chunk list.
# Before that, we have to build a fp32 param.
param_fp32 = torch.nn.Parameter(
torch.tensor([], dtype=torch.float, device=torch.device("cpu:0")),
requires_grad=False,
)
param_fp32_list.append(param_fp32)
register_param(
param_fp32, ParamType.CHUNK_BASED, torch.float, f"{name}_fp32"
)
param_fp32.ps_attr.reset_shape(param.shape)
self.client.param_fp16_to_param_fp32_map[param] = param_fp32
self.client.chunk_based_param_fp16.append(param)
self.client.append_tensor(
param_fp16_list, torch.half, AccessType.DATA, ChunkType.PARAM_FP16
)
self.client.append_tensor(
param_fp32_list, torch.float, AccessType.DATA, ChunkType.PARAM_FP32
)
for param_fp16, param_fp32 in zip(param_fp16_list, param_fp32_list):
# Delete the memory of non local tensors
if not self.client.is_local_param(param_fp16, AccessType.DATA):
param_fp16.ps_attr._is_local = False
param_fp32.ps_attr._is_local = False
# TODO(jiaruifang) fix distributed init bug.
# Check results will fail when not release_after_init.
# As release tensor here will make the random seed generator
# behave differently (less random number generated).
# NOTE(why dtype is torch.float rather than torch.half)
# PyTorch version lower than 1.5 can not initialize torch.half
# tensors on CPU. So although the param_fp16 is a fp16 type param,
# its pytorch dtype still be float.
if not self.release_after_init:
# Here we use a non-empty tensor for huggingface. Because it
# needs to initialize the weight for padding_idx.
param_fp16.data = torch.tensor(
[0], dtype=torch.float, device=param_fp16.device
)
else:
param_fp16.ps_attr._is_local = True
param_fp32.ps_attr._is_local = True
cast_forward(module, torch.half)
| 40.422989
| 96
| 0.618574
|
f6338d6b5b80188124ec43f4abcef9d222552160
| 2,370
|
py
|
Python
|
track_model/train_track_model.py
|
QUVA-Lab/lang-tracker
|
6cb3630471765565b6f2d34a160f0cd51d95a082
|
[
"BSD-2-Clause-FreeBSD"
] | 31
|
2017-09-13T13:40:59.000Z
|
2022-01-25T16:55:19.000Z
|
track_model/train_track_model.py
|
zhenyangli/lang-tracker
|
dddd808a22582573ab0a5e4c3dbf0ba054e42d61
|
[
"BSD-3-Clause"
] | 4
|
2017-09-14T01:56:58.000Z
|
2021-01-28T00:58:58.000Z
|
track_model/train_track_model.py
|
QUVA-Lab/lang-tracker
|
6cb3630471765565b6f2d34a160f0cd51d95a082
|
[
"BSD-2-Clause-FreeBSD"
] | 9
|
2017-09-28T03:22:08.000Z
|
2021-01-19T10:56:44.000Z
|
import caffe
import numpy as np
import os
import sys
import track_model_train as track_model
import train_config
def compute_accuracy(scores, labels):
is_pos = (labels != 0)
is_neg = np.logical_not(is_pos)
num_all = labels.shape[0]
num_pos = np.sum(is_pos)
num_neg = num_all - num_pos
is_correct = np.logical_xor(scores < 0, is_pos)
accuracy_all = np.sum(is_correct) / float(num_all)
accuracy_pos = np.sum(is_correct[is_pos]) / float(num_pos)
accuracy_neg = np.sum(is_correct[is_neg]) / float(num_neg)
return accuracy_all, accuracy_pos, accuracy_neg
def train(config):
with open('./track_model/proto_train.prototxt', 'w') as f:
f.write(str(track_model.generate_model('train', config)))
caffe.set_device(config.gpu_id)
caffe.set_mode_gpu()
solver = caffe.get_solver('./track_model/solver_sgd.prototxt')
solver.net.copy_from(config.weights)
#solver.net.save('./snapshots/track_model/_iter_0.caffemodel')
#solver.restore('./snapshots/track_model/_iter_50000.solverstate')
loss_avg = 0.0
avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg = 0, 0, 0
decay = 0.99
for it in range(config.max_iter):
solver.step(1)
loss_val = solver.net.blobs['loss'].data
#scores_val = solver.net.blobs['fcn_scaled_scores'].data.copy()
#label_val = solver.net.blobs['label'].data.copy()
#import ipdb; ipdb.set_trace()
loss_avg = decay*loss_avg + (1-decay)*loss_val
if it % config.iter_display == 0:
print('\titer = %d, cls_loss (cur) = %f, cls_loss (avg) = %f'
% (it, loss_val, loss_avg))
# Accuracy
#accuracy_all, accuracy_pos, accuracy_neg = compute_accuracy(scores_val, label_val)
#avg_accuracy_all = decay*avg_accuracy_all + (1-decay)*accuracy_all
#avg_accuracy_pos = decay*avg_accuracy_pos + (1-decay)*accuracy_pos
#avg_accuracy_neg = decay*avg_accuracy_neg + (1-decay)*accuracy_neg
#print('\titer = %d, accuracy (cur) = %f (all), %f (pos), %f (neg)'
# % (it, accuracy_all, accuracy_pos, accuracy_neg))
#print('\titer = %d, accuracy (avg) = %f (all), %f (pos), %f (neg)'
# % (it, avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg))
if __name__ == '__main__':
config = train_config.Config()
train(config)
| 35.909091
| 91
| 0.662447
|
e1dcda2647bcaf756ef5188b8343f9bd78166957
| 55,989
|
py
|
Python
|
python/ccxt/liquid.py
|
ive-a-dream/ccxt
|
1c6ec43f6d577f95610f5e4c01354bb91e8633c1
|
[
"MIT"
] | null | null | null |
python/ccxt/liquid.py
|
ive-a-dream/ccxt
|
1c6ec43f6d577f95610f5e4c01354bb91e8633c1
|
[
"MIT"
] | null | null | null |
python/ccxt/liquid.py
|
ive-a-dream/ccxt
|
1c6ec43f6d577f95610f5e4c01354bb91e8633c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class liquid(Exchange):
def describe(self):
return self.deep_extend(super(liquid, self).describe(), {
'id': 'liquid',
'name': 'Liquid',
'countries': ['JP', 'CN', 'TW'],
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': None,
'spot': True,
'margin': None, # has but not fully implemented
'swap': None, # has but not fully implemented
'future': False,
'option': False,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/45798859-1a872600-bcb4-11e8-8746-69291ce87b04.jpg',
'api': 'https://api.liquid.com',
'www': 'https://www.liquid.com',
'doc': [
'https://developers.liquid.com',
],
'fees': 'https://help.liquid.com/getting-started-with-liquid/the-platform/fee-structure',
'referral': 'https://www.liquid.com/sign-up/?affiliate=SbzC62lt30976',
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}',
'products/{id}/price_levels',
'executions',
'ir_ladders/{currency}',
'fees', # add fetchFees, fetchTradingFees, fetchFundingFees
],
},
'private': {
'get': [
'accounts', # undocumented https://github.com/ccxt/ccxt/pull/7493
'accounts/balance',
'accounts/main_asset',
'accounts/{id}',
'accounts/{currency}/reserved_balance_details',
'crypto_accounts', # add fetchAccounts
'crypto_withdrawal',
'crypto_withdrawals',
'crypto_withdrawals/crypto_networks',
'executions/me',
'fiat_accounts', # add fetchAccounts
'fund_infos', # add fetchDeposits
'loan_bids',
'loans',
'orders',
'orders/{id}',
'orders/{id}/trades', # add fetchOrderTrades
'trades',
'trades/{id}/loans',
'trading_accounts',
'trading_accounts/{id}',
'transactions',
'withdrawals', # add fetchWithdrawals
'user/fee_tier',
'user/fees',
'trading_accounts/{id}',
'bank_accounts',
'accounts/{currency}/reserved_balance_details',
],
'post': [
'crypto_withdrawals',
'fund_infos',
'fiat_accounts',
'loan_bids',
'orders',
'withdrawals',
'fees/estimate',
],
'put': [
'crypto_withdrawal/{id}/cancel',
'loan_bids/{id}/close',
'loans/{id}',
'orders/{id}', # add editOrder
'orders/{id}/cancel',
'trades/{id}',
'trades/{id}/adjust_margin',
'trades/{id}/close',
'trades/close_all',
'trading_accounts/{id}',
'withdrawals/{id}/cancel',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.0030,
'maker': 0.0000,
'tiers': {
'perpetual': {
'maker': [
[0, 0.0000],
[25000, 0.0000],
[50000, -0.00025],
[100000, -0.00025],
[1000000, -0.00025],
[10000000, -0.00025],
[25000000, -0.00025],
[50000000, -0.00025],
[75000000, -0.00025],
[100000000, -0.00025],
[200000000, -0.00025],
[300000000, -0.00025],
],
'taker': [
[0, 0.00120],
[25000, 0.00115],
[50000, 0.00110],
[100000, 0.00105],
[1000000, 0.00100],
[10000000, 0.00095],
[25000000, 0.00090],
[50000000, 0.00085],
[75000000, 0.00080],
[100000000, 0.00075],
[200000000, 0.00070],
[300000000, 0.00065],
],
},
'spot': {
'taker': [
[0, 0.003],
[10000, 0.0029],
[20000, 0.0028],
[50000, 0.0026],
[100000, 0.0020],
[1000000, 0.0016],
[5000000, 0.0012],
[10000000, 0.0010],
[25000000, 0.0009],
[50000000, 0.0008],
[100000000, 0.0007],
[200000000, 0.0006],
[500000000, 0.0004],
[1000000000, 0.0003],
],
'maker': [
[0, 0.0000],
[10000, 0.0020],
[20000, 0.0019],
[50000, 0.0018],
[100000, 0.0016],
[1000000, 0.0008],
[5000000, 0.0007],
[10000000, 0.0005],
[25000000, 0.0000],
[50000000, 0.0000],
[100000000, 0.0000],
[200000000, 0.0000],
[500000000, 0.0000],
[1000000000, 0.0000],
],
},
},
},
},
'precisionMode': TICK_SIZE,
'exceptions': {
'API rate limit exceeded. Please retry after 300s': DDoSProtection,
'API Authentication failed': AuthenticationError,
'Nonce is too small': InvalidNonce,
'Order not found': OrderNotFound,
'Can not update partially filled order': InvalidOrder,
'Can not update non-live order': OrderNotFound,
'not_enough_free_balance': InsufficientFunds,
'must_be_positive': InvalidOrder,
'less_than_order_size': InvalidOrder,
'price_too_high': InvalidOrder,
'price_too_small': InvalidOrder, # {"errors":{"order":["price_too_small"]}}
'product_disabled': BadSymbol, # {"errors":{"order":["product_disabled"]}}
},
'commonCurrencies': {
'BIFI': 'BIFIF',
'HOT': 'HOT Token',
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
'P-BTC': 'BTC',
'TON': 'Tokamak Network',
},
'options': {
'cancelOrderException': True,
'networks': {
'ETH': 'ERC20',
'TRX': 'TRC20',
'XLM': 'Stellar',
'ALGO': 'Algorand',
},
'swap': {
'fetchMarkets': {
'settlementCurrencies': ['BTC', 'ETH', 'XRP', 'QASH', 'USD', 'JPY', 'EUR', 'SGD', 'AUD'],
},
},
},
})
def fetch_currencies(self, params={}):
response = self.publicGetCurrencies(params)
#
# [
# {
# currency_type: 'fiat',
# currency: 'USD',
# symbol: '$',
# assets_precision: 2,
# quoting_precision: 5,
# minimum_withdrawal: '15.0',
# withdrawal_fee: 5,
# minimum_fee: null,
# minimum_order_quantity: null,
# display_precision: 2,
# depositable: True,
# withdrawable: True,
# discount_fee: 0.5,
# credit_card_fundable: False,
# lendable: False,
# position_fundable: True,
# has_memo: False,
# stable_currency: null,
# root_currency: 'USD',
# minimum_loan_bid_quantity: '0.0',
# maximum_order_taker_quantity: null,
# name: 'United States Dollar'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
depositable = self.safe_value(currency, 'depositable')
withdrawable = self.safe_value(currency, 'withdrawable')
active = depositable and withdrawable
amountPrecision = self.safe_integer(currency, 'assets_precision')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'deposit': depositable,
'withdraw': withdrawable,
'fee': self.safe_number(currency, 'withdrawal_fee'),
'precision': amountPrecision,
'limits': {
'amount': {
'min': math.pow(10, -amountPrecision),
'max': math.pow(10, amountPrecision),
},
'withdraw': {
'min': self.safe_number(currency, 'minimum_withdrawal'),
'max': None,
},
},
}
return result
def fetch_markets(self, params={}):
spot = self.publicGetProducts(params)
#
# [
# {
# "id":"637",
# "product_type":"CurrencyPair",
# "code":"CASH",
# "name":null,
# "market_ask":"0.00000797",
# "market_bid":"0.00000727",
# "indicator":null,
# "currency":"BTC",
# "currency_pair_code":"TFTBTC",
# "symbol":null,
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_tftbtc_637",
# "taker_fee":"0.0",
# "maker_fee":"0.0",
# "low_market_bid":"0.00000685",
# "high_market_ask":"0.00000885",
# "volume_24h":"3696.0755956",
# "last_price_24h":"0.00000716",
# "last_traded_price":"0.00000766",
# "last_traded_quantity":"1748.0377978",
# "average_price":null,
# "quoted_currency":"BTC",
# "base_currency":"TFT",
# "tick_size":"0.00000001",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":false,
# "last_event_timestamp":"1596962820.000797146",
# "timestamp":"1596962820.000797146",
# "multiplier_up":"9.0",
# "multiplier_down":"0.1",
# "average_time_interval":null
# },
# ]
#
perpetual = self.publicGetProducts({'perpetual': '1'})
#
# [
# {
# "id":"604",
# "product_type":"Perpetual",
# "code":"CASH",
# "name":null,
# "market_ask":"11721.5",
# "market_bid":"11719.0",
# "indicator":null,
# "currency":"USD",
# "currency_pair_code":"P-BTCUSD",
# "symbol":"$",
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_p-btcusd_604",
# "taker_fee":"0.0012",
# "maker_fee":"0.0",
# "low_market_bid":"11624.5",
# "high_market_ask":"11859.0",
# "volume_24h":"0.271",
# "last_price_24h":"11621.5",
# "last_traded_price":"11771.5",
# "last_traded_quantity":"0.09",
# "average_price":"11771.5",
# "quoted_currency":"USD",
# "base_currency":"P-BTC",
# "tick_size":"0.5",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":true,
# "last_event_timestamp":"1596963309.418853092",
# "timestamp":"1596963309.418853092",
# "multiplier_up":null,
# "multiplier_down":"0.1",
# "average_time_interval":300,
# "index_price":"11682.8124",
# "mark_price":"11719.96781",
# "funding_rate":"0.00273",
# "fair_price":"11720.2745"
# },
# ]
#
currencies = self.fetch_currencies()
currenciesByCode = self.index_by(currencies, 'code')
result = []
markets = self.array_concat(spot, perpetual)
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quoted_currency')
productType = self.safe_string(market, 'product_type')
swap = (productType == 'Perpetual')
type = 'swap' if swap else 'spot'
spot = not swap
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
disabled = self.safe_value(market, 'disabled', False)
baseCurrency = self.safe_value(currenciesByCode, base)
minAmount = None
if baseCurrency is not None:
minAmount = self.safe_number(baseCurrency['info'], 'minimum_order_quantity')
lastPrice = self.safe_number(market, 'last_traded_price')
minPrice = None
maxPrice = None
if lastPrice:
multiplierDown = self.safe_number(market, 'multiplier_down')
multiplierUp = self.safe_number(market, 'multiplier_up')
if multiplierDown is not None:
minPrice = lastPrice * multiplierDown
if multiplierUp is not None:
maxPrice = lastPrice * multiplierUp
margin = self.safe_value(market, 'margin_enabled')
symbol = base + '/' + quote
maker = self.fees['trading']['maker']
taker = self.fees['trading']['taker']
parsedMarket = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': type,
'spot': spot,
'margin': spot and margin,
'swap': swap,
'future': False,
'option': False,
'active': not disabled,
'contract': swap,
'linear': None,
'inverse': None,
'taker': taker,
'maker': maker,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number('0.00000001'),
'price': self.safe_number(market, 'tick_size'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
if swap:
settlementCurrencies = self.options['fetchMarkets']['settlementCurrencies']
for i in range(0, len(settlementCurrencies)):
settle = settlementCurrencies[i]
parsedMarket['settle'] = settle
parsedMarket['symbol'] = symbol + ':' + settle
parsedMarket['linear'] = quote == settle
parsedMarket['inverse'] = base == settle
parsedMarket['taker'] = self.safe_number(market, 'taker_fee', taker)
parsedMarket['maker'] = self.safe_number(market, 'maker_fee', maker)
parsedMarket['contractSize'] = self.parse_number('1')
result.append(parsedMarket)
else:
result.append(parsedMarket)
return result
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
crypto = self.safe_value(response, 'crypto_accounts', [])
fiat = self.safe_value(response, 'fiat_accounts', [])
for i in range(0, len(crypto)):
balance = crypto[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
for i in range(0, len(fiat)):
balance = fiat[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'reserved_balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
#
# {
# crypto_accounts: [
# {
# id: 2221179,
# currency: 'USDT',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usdt',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# address: '0',
# currency_symbol: 'USDT',
# minimum_withdraw: null,
# currency_type: 'crypto'
# },
# ],
# fiat_accounts: [
# {
# id: 1112734,
# currency: 'USD',
# balance: '0.0',
# reserved_balance: '0.0',
# pusher_channel: 'user_xxxxx_account_usd',
# lowest_offer_interest_rate: null,
# highest_offer_interest_rate: null,
# currency_symbol: '$',
# send_to_btc_address: null,
# exchange_rate: '1.0',
# currency_type: 'fiat'
# }
# ]
# }
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = self.publicGetProductsIdPriceLevels(self.extend(request, params))
return self.parse_order_book(response, symbol, None, 'buy_price_levels', 'sell_price_levels')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
last = None
if 'last_traded_price' in ticker:
if ticker['last_traded_price']:
length = len(ticker['last_traded_price'])
if length > 0:
last = self.safe_string(ticker, 'last_traded_price')
marketId = self.safe_string(ticker, 'id')
market = self.safe_market(marketId, market)
symbol = market['symbol']
baseId = self.safe_string(ticker, 'base_currency')
quoteId = self.safe_string(ticker, 'quoted_currency')
if (baseId is not None) and (quoteId is not None):
symbol = self.safe_currency_code(baseId) + '/' + self.safe_currency_code(quoteId)
open = self.safe_string(ticker, 'last_price_24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high_market_ask'),
'low': self.safe_string(ticker, 'low_market_bid'),
'bid': self.safe_string(ticker, 'market_bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'market_ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'volume_24h'),
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetProducts(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = self.publicGetProductsId(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
# { id: 12345,
# quantity: "6.789",
# price: "98765.4321",
# taker_side: "sell",
# created_at: 1512345678,
# my_side: "buy" }
timestamp = self.safe_timestamp(trade, 'created_at')
orderId = self.safe_string(trade, 'order_id')
# 'taker_side' gets filled for both fetchTrades and fetchMyTrades
takerSide = self.safe_string(trade, 'taker_side')
# 'my_side' gets filled for fetchMyTrades only and may differ from 'taker_side'
mySide = self.safe_string(trade, 'my_side')
side = mySide if (mySide is not None) else takerSide
takerOrMaker = None
if mySide is not None:
takerOrMaker = 'taker' if (takerSide == mySide) else 'maker'
price = self.safe_string(trade, 'price')
amount = self.safe_string(trade, 'quantity')
id = self.safe_string(trade, 'id')
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
if since is not None:
# timestamp should be in seconds, whereas we use milliseconds in since and everywhere
request['timestamp'] = int(since / 1000)
response = self.publicGetExecutions(self.extend(request, params))
result = response if (since is not None) else response['models']
return self.parse_trades(result, market, since, limit)
def fetch_trading_fee(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = self.publicGetProductsId(self.extend(request, params))
#
# {
# "id":"637",
# "product_type":"CurrencyPair",
# "code":"CASH",
# "name":null,
# "market_ask":"0.00000797",
# "market_bid":"0.00000727",
# "indicator":null,
# "currency":"BTC",
# "currency_pair_code":"TFTBTC",
# "symbol":null,
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_tftbtc_637",
# "taker_fee":"0.0",
# "maker_fee":"0.0",
# "low_market_bid":"0.00000685",
# "high_market_ask":"0.00000885",
# "volume_24h":"3696.0755956",
# "last_price_24h":"0.00000716",
# "last_traded_price":"0.00000766",
# "last_traded_quantity":"1748.0377978",
# "average_price":null,
# "quoted_currency":"BTC",
# "base_currency":"TFT",
# "tick_size":"0.00000001",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":false,
# "last_event_timestamp":"1596962820.000797146",
# "timestamp":"1596962820.000797146",
# "multiplier_up":"9.0",
# "multiplier_down":"0.1",
# "average_time_interval":null
# }
#
return self.parse_trading_fee(response, market)
def parse_trading_fee(self, fee, market=None):
marketId = self.safe_string(fee, 'id')
symbol = self.safe_symbol(marketId, market)
return {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(fee, 'maker_fee'),
'taker': self.safe_number(fee, 'taker_fee'),
'percentage': True,
'tierBased': True,
}
def fetch_trading_fees(self, params={}):
self.load_markets()
spot = self.publicGetProducts(params)
#
# [
# {
# "id":"637",
# "product_type":"CurrencyPair",
# "code":"CASH",
# "name":null,
# "market_ask":"0.00000797",
# "market_bid":"0.00000727",
# "indicator":null,
# "currency":"BTC",
# "currency_pair_code":"TFTBTC",
# "symbol":null,
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_tftbtc_637",
# "taker_fee":"0.0",
# "maker_fee":"0.0",
# "low_market_bid":"0.00000685",
# "high_market_ask":"0.00000885",
# "volume_24h":"3696.0755956",
# "last_price_24h":"0.00000716",
# "last_traded_price":"0.00000766",
# "last_traded_quantity":"1748.0377978",
# "average_price":null,
# "quoted_currency":"BTC",
# "base_currency":"TFT",
# "tick_size":"0.00000001",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":false,
# "last_event_timestamp":"1596962820.000797146",
# "timestamp":"1596962820.000797146",
# "multiplier_up":"9.0",
# "multiplier_down":"0.1",
# "average_time_interval":null
# },
# ]
#
perpetual = self.publicGetProducts({'perpetual': '1'})
#
# [
# {
# "id":"604",
# "product_type":"Perpetual",
# "code":"CASH",
# "name":null,
# "market_ask":"11721.5",
# "market_bid":"11719.0",
# "indicator":null,
# "currency":"USD",
# "currency_pair_code":"P-BTCUSD",
# "symbol":"$",
# "btc_minimum_withdraw":null,
# "fiat_minimum_withdraw":null,
# "pusher_channel":"product_cash_p-btcusd_604",
# "taker_fee":"0.0012",
# "maker_fee":"0.0",
# "low_market_bid":"11624.5",
# "high_market_ask":"11859.0",
# "volume_24h":"0.271",
# "last_price_24h":"11621.5",
# "last_traded_price":"11771.5",
# "last_traded_quantity":"0.09",
# "average_price":"11771.5",
# "quoted_currency":"USD",
# "base_currency":"P-BTC",
# "tick_size":"0.5",
# "disabled":false,
# "margin_enabled":false,
# "cfd_enabled":false,
# "perpetual_enabled":true,
# "last_event_timestamp":"1596963309.418853092",
# "timestamp":"1596963309.418853092",
# "multiplier_up":null,
# "multiplier_down":"0.1",
# "average_time_interval":300,
# "index_price":"11682.8124",
# "mark_price":"11719.96781",
# "funding_rate":"0.00273",
# "fair_price":"11720.2745"
# },
# ]
#
markets = self.array_concat(spot, perpetual)
result = {}
for i in range(0, len(markets)):
market = markets[i]
marketId = self.safe_string(market, 'id')
symbol = self.safe_symbol(marketId, market)
result[symbol] = self.parse_trading_fee(market)
return result
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
# the `with_details` param is undocumented - it adds the order_id to the results
request = {
'product_id': market['id'],
'with_details': True,
}
if limit is not None:
request['limit'] = limit
response = self.privateGetExecutionsMe(self.extend(request, params))
return self.parse_trades(response['models'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
request = {
'order_type': type,
'product_id': self.market_id(symbol),
'side': side,
'quantity': self.amount_to_precision(symbol, amount),
}
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
if (type == 'limit') or (type == 'limit_post_only') or (type == 'market_with_range') or (type == 'stop'):
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "client_order_id": null,
# }
#
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privatePutOrdersIdCancel(self.extend(request, params))
order = self.parse_order(response)
if order['status'] == 'closed':
if self.options['cancelOrderException']:
raise OrderNotFound(self.id + ' order closed already: ' + self.json(response))
return order
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if price is None:
raise ArgumentsRequired(self.id + ' editOrder() requires the price argument')
request = {
'order': {
'quantity': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
},
'id': id,
}
response = self.privatePutOrdersId(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'live': 'open',
'filled': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0"
# "client_order_id": null,
# }
#
# fetchOrder, fetchOrders, fetchOpenOrders, fetchClosedOrders
#
# {
# "id": 2157479,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.01",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "filled",
# "leverage_level": 2,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [
# {
# "id": 4566133,
# "quantity": "0.01",
# "price": "500.0",
# "taker_side": "buy",
# "my_side": "sell",
# "created_at": 1465396785
# }
# ]
# }
#
orderId = self.safe_string(order, 'id')
timestamp = self.safe_timestamp(order, 'created_at')
marketId = self.safe_string(order, 'product_id')
market = self.safe_value(self.markets_by_id, marketId)
status = self.parse_order_status(self.safe_string(order, 'status'))
amount = self.safe_number(order, 'quantity')
filled = self.safe_number(order, 'filled_quantity')
price = self.safe_number(order, 'price')
type = self.safe_string(order, 'order_type')
tradeCost = 0
tradeFilled = 0
average = self.safe_number(order, 'average_price')
trades = self.parse_trades(self.safe_value(order, 'executions', []), market, None, None, {
'order': orderId,
'type': type,
})
numTrades = len(trades)
for i in range(0, numTrades):
# php copies values upon assignment, but not references them
# todo rewrite self(shortly)
trade = trades[i]
trade['order'] = orderId
trade['type'] = type
tradeFilled = self.sum(tradeFilled, trade['amount'])
tradeCost = self.sum(tradeCost, trade['cost'])
cost = None
lastTradeTimestamp = None
if numTrades > 0:
lastTradeTimestamp = trades[numTrades - 1]['timestamp']
if not average and (tradeFilled > 0):
average = tradeCost / tradeFilled
if cost is None:
cost = tradeCost
if filled is None:
filled = tradeFilled
remaining = None
if amount is not None and filled is not None:
remaining = amount - filled
side = self.safe_string(order, 'side')
clientOrderId = self.safe_string(order, 'client_order_id')
return {
'id': orderId,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'timeInForce': None,
'postOnly': None,
'status': status,
'symbol': market['symbol'],
'side': side,
'price': price,
'stopPrice': None,
'amount': amount,
'filled': filled,
'cost': cost,
'remaining': remaining,
'average': average,
'trades': trades,
'fee': {
'currency': market['quote'],
'cost': self.safe_number(order, 'order_fee'),
},
'info': order,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersId(self.extend(request, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# 'funding_currency': market['quoteId'], # filter orders based on "funding" currency(quote currency)
# 'product_id': market['id'],
# 'status': 'live', # 'filled', 'cancelled'
# 'trading_type': 'spot', # 'margin', 'cfd'
'with_details': 1, # return full order details including executions
}
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
if limit is not None:
request['limit'] = limit
response = self.privateGetOrders(self.extend(request, params))
#
# {
# "models": [
# {
# "id": 2157474,
# "order_type": "limit",
# "quantity": "0.01",
# "disc_quantity": "0.0",
# "iceberg_total_quantity": "0.0",
# "side": "sell",
# "filled_quantity": "0.0",
# "price": "500.0",
# "created_at": 1462123639,
# "updated_at": 1462123639,
# "status": "live",
# "leverage_level": 1,
# "source_exchange": "QUOINE",
# "product_id": 1,
# "product_code": "CASH",
# "funding_currency": "USD",
# "currency_pair_code": "BTCUSD",
# "order_fee": "0.0",
# "executions": [], # optional
# }
# ],
# "current_page": 1,
# "total_pages": 1
# }
#
orders = self.safe_value(response, 'models', [])
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'live'}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {'status': 'filled'}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
# 'auth_code': '', # optional 2fa code
'crypto_withdrawal': {
'currency': currency['id'],
'address': address,
'amount': amount,
# 'payment_id': tag, # for XRP only
# 'memo_type': 'text', # 'text', 'id' or 'hash', for XLM only
# 'memo_value': tag, # for XLM only
},
}
if tag is not None:
if code == 'XRP':
request['crypto_withdrawal']['payment_id'] = tag
elif code == 'XLM':
request['crypto_withdrawal']['memo_type'] = 'text' # overrideable via params
request['crypto_withdrawal']['memo_value'] = tag
else:
raise NotSupported(self.id + ' withdraw() only supports a tag along the address for XRP or XLM')
networks = self.safe_value(self.options, 'networks', {})
paramsCwArray = self.safe_value(params, 'crypto_withdrawal', {})
network = self.safe_string_upper(paramsCwArray, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ERC20>ETH alias
if network is not None:
request['crypto_withdrawal']['network'] = network
params['crypto_withdrawal'] = self.omit(params['crypto_withdrawal'], 'network')
response = self.privatePostCryptoWithdrawals(self.deep_extend(request, params))
#
# {
# "id": 1353,
# "address": "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2",
# "amount": 1.0,
# "state": "pending",
# "currency": "BTC",
# "withdrawal_fee": 0.0,
# "created_at": 1568016450,
# "updated_at": 1568016450,
# "payment_id": null
# }
#
return self.parse_transaction(response, currency)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# state: 'processed', # optional: pending, filed, cancelled, processing, processed, reverted to_be_reviewed, declined, broadcasted
}
currency = None
if code is not None:
currency = self.currency(code)
response = self.privateGetCryptoWithdrawals(self.extend(request, params))
#
# {
# models: [
# {
# id: '2',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'processed',
# currency: 'BTC',
# withdrawal_fee: '0.0005',
# created_at: '1614718276',
# updated_at: '1614720926',
# payment_id: null,
# transaction_hash: 'xxxxxxxx...',
# broadcasted_at: '1614720762',
# wallet_label: 'btc',
# chain_name: 'Bitcoin',
# network: null
# },
# ],
# current_page: '1',
# total_pages: '1'
# }
#
transactions = self.safe_value(response, 'models', [])
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'pending': 'pending',
'cancelled': 'canceled',
'approved': 'ok',
'processing': 'pending',
'processed': 'ok',
'reverted': 'failed',
'to_be_reviewed': 'pending',
'declined': 'failed',
'broadcasted': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# id: '1',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'pending',
# currency: 'BTC',
# withdrawal_fee: '0.0007',
# created_at: '1626000533',
# updated_at: '1626000533',
# payment_id: null,
# transaction_hash: null,
# broadcasted_at: null,
# wallet_label: null,
# chain_name: 'Bitcoin',
# network: null
# },
#
# fetchWithdrawals
#
# {
# id: '2',
# address: '1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2',
# amount: '0.01',
# state: 'processed',
# currency: 'BTC',
# withdrawal_fee: '0.0005',
# created_at: '1614718276',
# updated_at: '1614720926',
# payment_id: '',
# transaction_hash: 'xxxxxxxx...',
# broadcasted_at: '1614720762',
# wallet_label: 'btc',
# chain_name: 'Bitcoin',
# network: null
# },
#
# fetchDeposits
#
# ...
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string_2(transaction, 'payment_id', 'memo_value')
txid = self.safe_string(transaction, 'transaction_hash')
currencyId = self.safe_string_2(transaction, 'currency', 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_timestamp(transaction, 'created_at')
updated = self.safe_timestamp(transaction, 'updated_at')
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
amountString = self.safe_string(transaction, 'amount')
feeCostString = self.safe_string(transaction, 'withdrawal_fee')
amount = self.parse_number(Precise.string_sub(amountString, feeCostString))
network = self.safe_string(transaction, 'chain_name')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': self.parse_number(feeCostString),
},
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
headers = {
'X-Quoine-API-Version': self.version,
'Content-Type': 'application/json',
}
if api == 'private':
self.check_required_credentials()
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
nonce = self.nonce()
request = {
'path': url,
'token_id': self.apiKey,
'iat': int(math.floor(nonce / 1000)), # issued at
}
if not ('client_order_id' in query):
request['nonce'] = nonce
headers['X-Quoine-Auth'] = self.jwt(request, self.encode(self.secret))
else:
if query:
url += '?' + self.urlencode(query)
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code >= 200 and code < 300:
return
if code == 401:
# expected non-json response
self.throw_exactly_matched_exception(self.exceptions, body, body)
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if response is None:
return
feedback = self.id + ' ' + body
message = self.safe_string(response, 'message')
errors = self.safe_value(response, 'errors')
if message is not None:
#
# {"message": "Order not found"}
#
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
elif errors is not None:
#
# {"errors": {"user": ["not_enough_free_balance"]}}
# {"errors": {"quantity": ["less_than_order_size"]}}
# {"errors": {"order": ["Can not update partially filled order"]}}
#
types = list(errors.keys())
for i in range(0, len(types)):
type = types[i]
errorMessages = errors[type]
for j in range(0, len(errorMessages)):
message = errorMessages[j]
self.throw_exactly_matched_exception(self.exceptions, message, feedback)
else:
raise ExchangeError(feedback)
| 41.229013
| 143
| 0.452089
|
48fd769824ccc88a2917568eb9b92054c317dde7
| 1,319
|
py
|
Python
|
micropython/examples/pico_display/demo.py
|
jpwsutton/pimoroni-pico
|
2dd70029deca9e19362f4ad6134b8361915f6129
|
[
"MIT"
] | null | null | null |
micropython/examples/pico_display/demo.py
|
jpwsutton/pimoroni-pico
|
2dd70029deca9e19362f4ad6134b8361915f6129
|
[
"MIT"
] | null | null | null |
micropython/examples/pico_display/demo.py
|
jpwsutton/pimoroni-pico
|
2dd70029deca9e19362f4ad6134b8361915f6129
|
[
"MIT"
] | null | null | null |
import time, random
import picodisplay as display
WIDTH = 240
HEIGHT = 135
display_buffer = bytearray(WIDTH * HEIGHT)
display.init(display_buffer)
display.set_backlight(1.0)
i = 0
width = display.get_width()
height = display.get_height()
class Ball:
def __init__(self, x, y, r, dx, dy, pen):
self.x = x
self.y = y
self.r = r
self.dx = dx
self.dy = dy
self.pen = pen
# initialise shapes
balls = []
for i in range(0, 100):
balls.append(
Ball(
random.randint(0, width),
random.randint(0, height),
random.randint(0, 10) + 3,
random.randint(0, 255) / 128,
random.randint(0, 255) / 128,
display.create_pen(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
)
)
while True:
display.set_pen(120, 40, 60)
display.clear()
for ball in balls:
ball.x += ball.dx
ball.y += ball.dy
if ball.x < 0 or ball.x > width:
ball.dx *= -1
if ball.y < 0 or ball.y > height:
ball.dy *= -1
display.set_pen(ball.pen)
display.circle(int(ball.x), int(ball.y), int(ball.r))
display.update()
time.sleep(0.01)
| 24.425926
| 115
| 0.526914
|
7259396d5574ceea129c1b009bcdf6fcc2691416
| 14,757
|
py
|
Python
|
cengal/io/net_io/versions/v_0/net_io_abstract.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | 3
|
2018-07-23T18:48:58.000Z
|
2021-07-18T14:17:20.000Z
|
cengal/io/net_io/versions/v_0/net_io_abstract.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
cengal/io/net_io/versions/v_0/net_io_abstract.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import errno
import copy
import enum
from contextlib import contextmanager
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <gtalk@butenkoms.space>"
__copyright__ = "Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>"
__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>"
__email__ = "gtalk@butenkoms.space"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
class LoopIsAlreadyBegun(Exception):
"""
You can not run NetIOUserApi.start() if it was already started (and still running).
"""
pass
class WrongConnectionType(Exception):
"""
You cannot run NetIOUserApi.make_connection() for ConnectionType.active_accepted connection. This kind of
connections are made only from (and by) inside of IO loop and logic itself.
"""
pass
class CanNotMakeConnection(Exception):
"""
Currently not used. If there will be some exception on connect() call - it will be raised without any changes.
"""
pass
class ConnectionType(enum.Enum):
passive = 0 # passive socket (bind())
active_accepted = 1 # active accepted socket (accept())
active_connected = 2 # active connected socket (connect())
class ConnectionState(enum.Enum):
not_connected_yet = 0 # socket is not in connection process
waiting_for_connection = 1 # socket is in connection process (async connection is delayed)
connected = 2 # socket is successfully connected
worker_fault = 3 # there was unhandled exception from one of the WorkerBase callbacks
io_fault = 4 # there was some IO trouble
waiting_for_disconnection = 5 # connection was marked as "should be closed" but was not closed yet
disconnected = 6 # socket is closed
class ConnectionInfo:
def __init__(self,
worker_obj,
connection_type: ConnectionType,
socket_address=None,
socket_family=socket.AF_INET,
socket_type=socket.SOCK_STREAM,
socket_protocol=0,
socket_fileno=None,
backlog=0):
"""
:param worker_obj: constructed worker object (see WorkerBase for more info). If this is a passive
connection - it (worker_obj) will be inherited by the descendant active_accepted connections
by copy.copy() call (see WorkerBase.__copy__() method for more info)
:param connection_type: see ConnectionType() description
:param socket_address: see socket.bind()/socket.connect() docs
:param socket_family: see socket.socket() docs
:param socket_type: see socket.socket() docs
:param socket_protocol: see socket.socket() docs
:param socket_fileno: see socket.socket() docs
:param backlog: see socket.listen() docs
"""
self.worker_obj = worker_obj
self.connection_type = connection_type
self.socket_address = socket_address
self.socket_family = socket_family
self.socket_type = socket_type
self.socket_protocol = socket_protocol
self.socket_fileno = socket_fileno
self.backlog = backlog
class Connection:
"""
Connection class. Usually created by IO loop or by IO API. But you can also create it by yourself
"""
def __init__(self,
connection_id,
connection_info: ConnectionInfo,
connection_and_address_pair: tuple,
connection_state: ConnectionState,
connection_name=None,
):
"""
:param connection_id: unique connection identificator (unique within the IO object). You may use some random
GUID if you are creating connection by your self.
:param connection_info: new connection will be created using information provided in connection_info object.
See ConnectionInfo() for more information
:param connection_and_address_pair: (conn, address) tuple where conn is connected socket (or it can be socket
is in the process of connection. But only if it was made by IO loop.).
:param connection_state: see ConnectionState for more information
:param connection_name: name of the connection (if it was provided)
"""
self.connection_id = connection_id
self.connection_info = connection_info
self.conn, self.address = connection_and_address_pair
self.connection_state = connection_state
self.connection_name = connection_name
self.worker_obj = connection_info.worker_obj
self.read_data = b'' # already read data
self.must_be_written_data = memoryview(b'') # this data should be written
self.force_write_call = False
def add_must_be_written_data(self, data):
"""
Use this method to add data to output buffers
:param data: some new output data to be send through this connection
:return:
"""
self.must_be_written_data = memoryview(bytes(self.must_be_written_data) + data)
class NetIOUserApi:
"""
You may rely and use freely use methods of this base class from inside your program or from inside your worker
(WorkerBase).
"""
def __init__(self):
super().__init__()
self.all_connections = set()
self.passive_connections = set()
self.connection_by_id = dict()
self.connection_by_name = dict()
self.connection_by_fileno = dict()
def start(self, destroy_on_finish=True):
"""
Will start IO loop
:param destroy_on_finish: if True - destroy() will be called from inside of this method
:return:
"""
raise NotImplementedError()
def stop(self):
"""
Will initiate IO loop stop process
:return:
"""
raise NotImplementedError()
def make_connection(self, connection_info: ConnectionInfo = None, name=None)->Connection:
"""
Will create connection from given connection_info object. Than connection will be established. Immediate or
delayed - depends on the connection type:
- ConnectionType.passive - immediate;
- ConnectionType.active_connected - delayed.
In both cases WorkerBase.on_connect will be called immediately after connection will be successfully
established (IF it will be successfully established).
:param connection_info: new connection will be created using information provided in connection_info object.
See ConnectionInfo() for more information
:param name: name of the connection. If you'll provide it - you will be able to find this connection in
NetIOUserApi.connection_by_name dictionary by it's name.
:return:
"""
raise NotImplementedError()
def add_connection(self, connection: Connection):
"""
Will register already established connection. You need to use this method for example if you have already
connected socket
:param connection:
:return:
"""
raise NotImplementedError()
def remove_connection(self, connection: Connection):
"""
Will close and remove connection
:param connection:
:return:
"""
raise NotImplementedError()
def check_is_connection_need_to_sent_data(self, connection: Connection):
"""
Will check connection to output data presence. It is automatically called after EACH WorkerBase callback call
by the IO loop. But if you are filling other connection's output buffer - you'll need to make this call for that
connection by your self.
:param connection:
:return:
"""
raise NotImplementedError()
class NetIOCallbacks:
"""
Callbacks from this class will be called from inside (and by) IOMethodBase loop.
"""
def __init__(self):
super().__init__()
def on_accept_connection(self, connection: Connection):
raise NotImplementedError()
def on_connected(self, connection: Connection):
raise NotImplementedError()
def on_read(self, connection: Connection):
raise NotImplementedError()
def on_write(self, connection: Connection):
raise NotImplementedError()
def on_close(self, connection: Connection):
raise NotImplementedError()
class NetIOBase(NetIOUserApi, NetIOCallbacks):
"""
Base class for any IO implementation (Linux, BSD, Windows, cross platform, etc.).
"""
def __init__(self, transport):
"""
:param transport: class of the desired IOMethod. Instance (object) will be created by NetIOBase itself
"""
super().__init__()
self.method = transport(self)
def destroy(self):
raise NotImplementedError()
class WorkerBase:
"""
Base class for all workers.
on_* callbacks will be called by the IO loop.
General info:
You can read input data from self.connection at any time (see "Caution" section of __init__ doc string) from any
callback.
You can write output data (to be send) to self.connection at any time (see "Caution") from any callback.
"""
def __init__(self, api: NetIOUserApi=None, connection: Connection=None):
"""
Caution:
Please do not rely on self.api and self.connection inside of your __init__ constructor: it is guaranteed that
they will be set before any callback call, but not at the construction process.
:param api: link to the constructed network io object
:param connection: link to the constructed connection object
"""
self.api = api
self.connection = connection
def on_connect(self):
"""
Will be called after connection successfully established
:return:
"""
pass
def on_read(self):
"""
Will be called if there is some NEW data in the connection input buffer
:return:
"""
pass
def on_no_more_data_to_write(self):
"""
Will be called after all data is sent.
Normally it will be called once (one single shot after each portion of out data is sent).
If you'll set self.connection.force_write_call to True state - this callback may be called continuously
(but not guaranteed: it depends of used IOMethod implementation)
:return:
"""
pass
def on_connection_lost(self):
"""
Will be called AFTER connection socket was actually closed and removed from IOMethod checking list.
At this time, self.connection.connection_state is already set to ConnectionState.disconnected.
:return:
"""
pass
def __copy__(self):
"""
This method SHOULD be implemented. It should create a new instance and copy some global (shared) data from
current object to that new instance. It will be called when new peer is connected to existing passive connection
So this is the way you may use to give all new connection some links to some global data by worker object of
the passive connection replication process.
:return:
"""
raise NotImplementedError()
@contextmanager
def net_io(net_io_obj: NetIOBase):
"""
Context manager.
Usage:
main_io = NetIOBase()
with(net_io(main_io)) as io:
print('Preparing connections')
connection1 = io.make_connection(...)
connection2 = io.make_connection(...)
k = c + 12
...
connectionN = io.make_connection(...)
print('Starting IO loop')
print('IO loop was finished properly')
:param net_io_obj: constructed IO instance (object)
:return:
"""
try:
yield net_io_obj
net_io_obj.start(destroy_on_finish=False)
finally:
net_io_obj.destroy()
class IOMethodBase:
"""
Base class for all IOMethod implementation (select, epoll, overlapped io, kqueue, etc.)
All his methods are called by the NetIOBase instance.
"""
def __init__(self, interface: NetIOBase):
self.interface = interface
self.should_be_closed = set()
pass
def loop_iteration(self, timeout=None):
"""
Single IO loop iteration.
This method holds all IOMethod logic.
:return:
"""
raise NotImplementedError()
def destroy(self):
"""
Initiates destruction process
:return:
"""
raise NotImplementedError()
def set__can_read(self, conn: socket.socket, state=True):
"""
Will allow (True) or disallow (False) "socket available to read" checks for socket
:param conn: target socket
:param state: True - allow; False - disallow
:return:
"""
raise NotImplementedError()
def set__need_write(self, conn: socket.socket, state=True):
"""
Will allow (True) or disallow (False) "socket available to write" checks for socket
:param conn: target socket
:param state: True - allow; False - disallow
:return:
"""
raise NotImplementedError()
def set__should_be_closed(self, conn: socket.socket):
"""
Mark socket as "should be closed"
:param conn: target socket
:return:
"""
raise NotImplementedError()
def add_connection(self, conn: socket.socket):
"""
Will add socket to the internal connections list
:param conn: target socket
:return:
"""
raise NotImplementedError()
def remove_connection(self, conn: socket.socket):
"""
Will remove socket from the internal connections list
:param conn: target socket
:return:
"""
raise NotImplementedError()
| 34.969194
| 120
| 0.659213
|
58232ae7d6fdc1ebc32a23850fc3b2016e22af2b
| 2,037
|
py
|
Python
|
webtool/server/migrations/0027_booklet.py
|
wodo/WebTool3
|
1582a03d619434d8a6139f705a1b5860e9b5b8b8
|
[
"BSD-2-Clause"
] | 13
|
2018-12-16T21:01:24.000Z
|
2019-07-03T06:23:41.000Z
|
webtool/server/migrations/0027_booklet.py
|
dav-kempten/WebTool3
|
859f39df67cb0f853c7fe33cb5d08b999d8692fc
|
[
"BSD-2-Clause"
] | 26
|
2019-07-07T06:44:06.000Z
|
2021-09-07T07:28:34.000Z
|
webtool/server/migrations/0027_booklet.py
|
dav-kempten/WebTool3
|
859f39df67cb0f853c7fe33cb5d08b999d8692fc
|
[
"BSD-2-Clause"
] | 3
|
2017-06-18T06:22:52.000Z
|
2019-07-03T06:21:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-11 07:33
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('server', '0026_auto_20181211_0526'),
]
operations = [
migrations.CreateModel(
name='Booklet',
fields=[
('updated', models.DateTimeField(auto_now=True)),
('deprecated', models.BooleanField(default=False, verbose_name='Element gilt als gelöscht')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('references', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=7), help_text='Welche Veranstaltungen kommen in das Booklet', size=None, verbose_name='Inhalt')),
('format', models.CharField(choices=[('desktop', 'Desktop'), ('desktop', 'Print')], default='desktop', help_text='Formatierungsvorgaben', max_length=10, verbose_name='Formatvorlage')),
('header', models.CharField(blank=True, default='', help_text='Das ist der Text für das Titelblatt', max_length=125, verbose_name='Titel')),
('sub_header', models.CharField(blank=True, default='', help_text='Das ist der Untertitel für das Titelblatt', max_length=125, verbose_name='Untertitel')),
('main_header', models.CharField(blank=True, default='', help_text='Das ist der Haupttitel für das Titelblatt', max_length=125, verbose_name='Haupttitel')),
('status', models.CharField(choices=[('pending', 'Pending'), ('failed', 'Failed'), ('done', 'Done')], default='pending', max_length=10)),
],
options={
'verbose_name': 'Programmheft',
'verbose_name_plural': 'Programmhefte',
'ordering': ('updated',),
'get_latest_by': 'updated',
},
),
]
| 53.605263
| 209
| 0.633775
|
6c3a3d4ae4f4c46fc9dafb815c9cd2a3e33c81da
| 6,730
|
py
|
Python
|
main.py
|
royd1990/DD2424-Project
|
2665f81d1ecdbaca257ef4ac18c59b64dd16ae5a
|
[
"MIT"
] | null | null | null |
main.py
|
royd1990/DD2424-Project
|
2665f81d1ecdbaca257ef4ac18c59b64dd16ae5a
|
[
"MIT"
] | null | null | null |
main.py
|
royd1990/DD2424-Project
|
2665f81d1ecdbaca257ef4ac18c59b64dd16ae5a
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import utils
from model import Model
def train(net, data_loader, train_optimizer, temperature, loss_type, tau_plus):
net.train()
total_loss = 0
total_count = 0
train_progress = tqdm(data_loader)
for pos_1, pos_2, target in train_progress:
pos_1 = pos_1.cuda(non_blocking=True)
pos_2 = pos_2.cuda(non_blocking=True)
feature_1, out_1 = net(pos_1)
feature_2, out_2 = net(pos_2)
# neg score
out = torch.cat([out_1, out_2], dim=0)
neg = torch.exp(torch.mm(out, out.t().contiguous()) / temperature)
mask = utils.get_negative_mask(batch_size).cuda()
neg = neg.masked_select(mask).view(2 * batch_size, -1)
# pos score
pos = torch.exp(torch.sum(out_1 * out_2, dim=-1) / temperature)
pos = torch.cat([pos, pos], dim=0)
# estimator g()
if loss_type:
N = batch_size * 2 - 2
Ng = (-tau_plus * N * pos + neg.sum(dim = -1)) / (1 - tau_plus)
# constrain (optional)
Ng = torch.clamp(Ng, min = N * np.e**(-1 / temperature))
else:
Ng = neg.sum(dim=-1)
# contrastive loss
loss = (- torch.log(pos / (pos + Ng) )).mean()
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_count += batch_size
total_loss += loss.item() * batch_size
current_loss = total_loss / total_count
train_progress.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, current_loss))
training_loss = total_loss / total_count
return training_loss
# test for one epoch, use weighted knn to find the most similar images' label to assign the test image
def test(network, memory_data_loader, test_set_loader):
network.eval()
total_top1 = 0
total_top5 = 0
total_num = 0
feature_bank = []
with torch.no_grad():
# generate feature bank
for data, _, target in tqdm(memory_data_loader, desc='Extract Features'):
feature, out = network(data.cuda(non_blocking=True))
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_targets = torch.tensor(memory_data_loader.dataset.labels, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_progress = tqdm(test_set_loader)
for data, _, target in test_progress:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature, out = network(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank -> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_targets.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_weight = (sim_weight / temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * k, c, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1).long(), value=1.0)
# weighted score -> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_top5 += torch.sum((pred_labels[:, :5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_progress.set_description('KNN Test Epoch: [{}/{}] Acc@1:{:.2f}% Acc@5:{:.2f}%'
.format(epoch, epochs, total_top1 / total_num * 100, total_top5 / total_num * 100))
return total_top1 / total_num * 100, total_top5 / total_num * 100
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train SimCLR')
parser.add_argument('--input_dim', default=128, type=int, help='Input dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--tau_plus', default=0.1, type=float, help='Positive class priorx')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used in prediction')
parser.add_argument('--batch_size', default=32, type=int, help='Number of images in a mini-batch')
parser.add_argument('--epochs', default=500, type=int, help='Number of sweeps over the train set')
parser.add_argument('--loss_type', default=True, type=bool, help='Debiased contrastive loss or standard loss')
# args parse
args = parser.parse_args()
input_dim = args.input_dim
temperature = args.temperature
tau_plus = args.tau_plus
k = args.k
batch_size = args.batch_size
epochs = args.epochs
loss_type = args.loss_type
# data prepare
#train_data = utils.STL10Pair(root='data', split='train+unlabeled', transform=utils.train_transform)
train_data = utils.MNISTPair(root='data', train=True, transform=utils.train_transform, download=True)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=True,
drop_last=True)
memory_data = utils.MNISTPair(root='data', train=True, transform=utils.test_transform)
memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, pin_memory=True)
test_data = utils.MNISTPair(root='data', train=False, transform=utils.test_transform)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, pin_memory=True)
# model setup and optimizer config
model = Model(input_dim).cuda()
model = nn.DataParallel(model)
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
c = len(memory_data.classes)
print('# Classes: {}'.format(c))
# training loop
if not os.path.exists('results'):
os.mkdir('results')
for epoch in range(1, epochs + 1):
train_loss = train(model, train_loader, optimizer, temperature, loss_type, tau_plus)
if epoch % 25 == 0:
test_acc_1, test_acc_5 = test(model, memory_loader, test_loader)
torch.save(model.state_dict(), 'results/model_{}.pth'.format(epoch))
| 43.419355
| 125
| 0.645468
|
daf182375541e1f4c66c579ab0d5d361a3779fab
| 5,143
|
py
|
Python
|
tf_agents/bandits/policies/categorical_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2021-10-30T16:57:37.000Z
|
2021-11-17T10:21:17.000Z
|
tf_agents/bandits/policies/categorical_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/bandits/policies/categorical_policy.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2020-06-05T18:38:16.000Z
|
2020-07-08T14:41:42.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy that chooses actions based on a categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.policies import tf_policy
from tf_agents.trajectories import policy_step
from tf_agents.utils import common
from tf_agents.utils import nest_utils
tfd = tfp.distributions
def _validate_weights(weights):
if len(weights.shape) != 1:
raise ValueError(
'Expected a 1D `Tensor` of weights; got {}.'.format(weights))
class CategoricalPolicy(tf_policy.TFPolicy):
"""Policy that chooses an action based on a categorical distribution.
The distribution is specified by a set of weights for each action and an
inverse temperature. The unnormalized probability distribution is given by
`exp(weight * inv_temp)`. Weights and inverse temperature are typically
maintained as `Variable`s, and are updated by an `Agent`.
Note that this policy does not make use of `time_step.observation` at all.
That is, it is a non-contextual bandit policy.
"""
def __init__(self,
weights,
time_step_spec,
action_spec,
inverse_temperature=1.,
emit_log_probability=True,
name=None):
"""Initializes `CategoricalPolicy`.
The `weights` and `inverse_temperature` arguments may be either `Tensor`s or
`tf.Variable`s. If they are variables, then any assignments to those
variables will be reflected in the output of the policy. The shape of
`weights` is used to determine the action_spec for this policy;
`action_spec.maximum = weights.shape[0]`.
If `emit_log_probability=True`, the info field of the `PolicyStep` returned
will be a `PolicyInfo` tuple with log-probability that can be accessed
using `policy_step.get_log_probability(step.info)`.
Args:
weights: a vector of weights, corresponding to the unscaled log
probabilities of a categorical distribution.
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A `tensor_spec` of action specification.
inverse_temperature: a float value used to scale `weights`. Lower values
will induce a more uniform distribution over actions; higher values will
result in a sharper distribution.
emit_log_probability: Whether to emit log probabilities or not.
name: The name of this policy.
Raises:
ValueError: If the number of actions specified by the action_spec does not
match the dimension of weights.
"""
_validate_weights(weights)
self._weights = weights
self._inverse_temperature = inverse_temperature
if action_spec.maximum + 1 != tf.compat.dimension_value(weights.shape[0]):
raise ValueError(
'Number of actions ({}) does not match weights dimension ({}).'
.format(action_spec.maximum + 1,
tf.compat.dimension_value(weights.shape[0])))
super(CategoricalPolicy, self).__init__(
time_step_spec=time_step_spec,
action_spec=action_spec,
emit_log_probability=True,
name=name)
def _variables(self):
return [v for v in [self._weights, self._inverse_temperature]
if isinstance(v, tf.Variable)]
def _distribution(self, time_step, policy_state):
"""Implementation of `distribution`. Returns a `Categorical` distribution.
The returned `Categorical` distribution has (unnormalized) probabilities
`exp(inverse_temperature * weights)`.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: Unused in `CategoricalPolicy`. It is simply passed through.
Returns:
A `PolicyStep` named tuple containing:
`action`: A (optionally nested) of tfp.distribution.Distribution
capturing the distribution of next actions.
`state`: A policy state tensor for the next call to distribution.
`info`: Optional side information such as action log probabilities.
"""
outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec)
logits = (
self._inverse_temperature *
common.replicate(self._weights, outer_shape))
action_distribution = tfd.Independent(
tfd.Categorical(logits=logits, dtype=self._action_spec.dtype))
return policy_step.PolicyStep(action_distribution, policy_state)
| 40.496063
| 80
| 0.722341
|
ef5e3d6c3d7165ee77824c43334cfc873c07267d
| 1,621
|
py
|
Python
|
tests/_tests_scripts/z_distributed_12.py
|
pdanilov/catalyst
|
2ede5354b2a02807b3c1c69fc0718351382c8301
|
[
"Apache-2.0"
] | null | null | null |
tests/_tests_scripts/z_distributed_12.py
|
pdanilov/catalyst
|
2ede5354b2a02807b3c1c69fc0718351382c8301
|
[
"Apache-2.0"
] | null | null | null |
tests/_tests_scripts/z_distributed_12.py
|
pdanilov/catalyst
|
2ede5354b2a02807b3c1c69fc0718351382c8301
|
[
"Apache-2.0"
] | 1
|
2018-12-21T15:56:21.000Z
|
2018-12-21T15:56:21.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset
from catalyst import dl
class Projector(nn.Module):
"""
Simpler neural network example - Projector.
"""
def __init__(self, input_size: int):
"""
Init method.
Args:
input_size(int): number of features in projected space.
"""
super().__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, X: torch.Tensor) -> torch.Tensor:
"""
Forward method.
Args:
X(torch.Tensor): input tensor.
Returns:
(torch.Tensor): output projection.
"""
return self.linear(X).squeeze(-1)
def datasets_fn(num_features: int):
"""
Datasets closure.
Args:
num_features: number of features for dataset creation.
"""
X = torch.rand(int(1e4), num_features)
y = torch.rand(X.shape[0])
dataset = TensorDataset(X, y)
return {"train": dataset, "valid": dataset}
num_features = 10
model = Projector(num_features)
# example 12 - distibuted training with datasets closure
runner = dl.SupervisedRunner()
runner.train(
model=model,
# loaders={"train": loader, "valid": loader},
datasets={
"batch_size": 32,
"num_workers": 1,
"get_datasets_fn": datasets_fn,
"num_features": num_features,
},
criterion=nn.MSELoss(),
optimizer=optim.Adam(model.parameters()),
logdir="logs/log_example_12",
num_epochs=10,
verbose=True,
check=True,
fp16=False,
distributed=True,
)
| 22.205479
| 67
| 0.613819
|
ab83b84922c1fc225ef942f80a56792bbc817273
| 6,558
|
py
|
Python
|
superset/common/utils.py
|
ekmixon/superset
|
540277ebb108ccaf7937a173f6cfde90f2e72813
|
[
"Apache-2.0"
] | 2
|
2021-09-13T07:20:49.000Z
|
2021-11-14T20:06:47.000Z
|
superset/common/utils.py
|
ekmixon/superset
|
540277ebb108ccaf7937a173f6cfde90f2e72813
|
[
"Apache-2.0"
] | 62
|
2020-05-06T22:51:53.000Z
|
2022-03-28T20:49:17.000Z
|
superset/common/utils.py
|
ekmixon/superset
|
540277ebb108ccaf7937a173f6cfde90f2e72813
|
[
"Apache-2.0"
] | 1
|
2022-02-07T18:59:01.000Z
|
2022-02-07T18:59:01.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Dict, Optional
from flask_caching import Cache
from pandas import DataFrame
from superset import app
from superset.constants import CacheRegion
from superset.exceptions import CacheLoadError
from superset.extensions import cache_manager
from superset.models.helpers import QueryResult
from superset.stats_logger import BaseStatsLogger
from superset.utils.cache import set_and_log_cache
from superset.utils.core import error_msg_from_exception, get_stacktrace, QueryStatus
config = app.config
stats_logger: BaseStatsLogger = config["STATS_LOGGER"]
logger = logging.getLogger(__name__)
_cache: Dict[CacheRegion, Cache] = {
CacheRegion.DEFAULT: cache_manager.cache,
CacheRegion.DATA: cache_manager.data_cache,
}
class QueryCacheManager:
"""
Class for manage query-cache getting and setting
"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(
self,
df: DataFrame = DataFrame(),
query: str = "",
annotation_data: Optional[Dict[str, Any]] = None,
status: Optional[str] = None,
error_message: Optional[str] = None,
is_loaded: bool = False,
stacktrace: Optional[str] = None,
is_cached: Optional[bool] = None,
cache_dttm: Optional[str] = None,
cache_value: Optional[Dict[str, Any]] = None,
) -> None:
self.df = df
self.query = query
self.annotation_data = {} if annotation_data is None else annotation_data
self.status = status
self.error_message = error_message
self.is_loaded = is_loaded
self.stacktrace = stacktrace
self.is_cached = is_cached
self.cache_dttm = cache_dttm
self.cache_value = cache_value
# pylint: disable=too-many-arguments
def set_query_result(
self,
key: str,
query_result: QueryResult,
annotation_data: Optional[Dict[str, Any]] = None,
force_query: Optional[bool] = False,
timeout: Optional[int] = None,
datasource_uid: Optional[str] = None,
region: CacheRegion = CacheRegion.DEFAULT,
) -> None:
"""
Set dataframe of query-result to specific cache region
"""
try:
self.status = query_result.status
self.query = query_result.query
self.error_message = query_result.error_message
self.df = query_result.df
self.annotation_data = {} if annotation_data is None else annotation_data
if self.status != QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not force_query:
stats_logger.incr("loaded_from_source_without_force")
self.is_loaded = True
value = {
"df": self.df,
"query": self.query,
"annotation_data": self.annotation_data,
}
if self.is_loaded and key and self.status != QueryStatus.FAILED:
self.set(
key=key,
value=value,
timeout=timeout,
datasource_uid=datasource_uid,
region=region,
)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
if not self.error_message:
self.error_message = str(ex)
self.status = QueryStatus.FAILED
self.stacktrace = get_stacktrace()
@classmethod
def get(
cls,
key: Optional[str],
region: CacheRegion = CacheRegion.DEFAULT,
force_query: Optional[bool] = False,
force_cached: Optional[bool] = False,
) -> "QueryCacheManager":
"""
Initialize QueryCacheManager by query-cache key
"""
query_cache = cls()
if not key or not _cache[region] or force_query:
return query_cache
cache_value = _cache[region].get(key)
if cache_value:
logger.info("Cache key: %s", key)
stats_logger.incr("loading_from_cache")
try:
query_cache.df = cache_value["df"]
query_cache.query = cache_value["query"]
query_cache.annotation_data = cache_value.get("annotation_data", {})
query_cache.status = QueryStatus.SUCCESS
query_cache.is_loaded = True
query_cache.is_cached = cache_value is not None
query_cache.cache_dttm = (
cache_value["dttm"] if cache_value is not None else None
)
query_cache.cache_value = cache_value
stats_logger.incr("loaded_from_cache")
except KeyError as ex:
logger.exception(ex)
logger.error(
"Error reading cache: %s",
error_msg_from_exception(ex),
exc_info=True,
)
logger.info("Serving from cache")
if force_cached and not query_cache.is_loaded:
logger.warning(
"force_cached (QueryContext): value not found for key %s", key
)
raise CacheLoadError("Error loading data from cache")
return query_cache
@staticmethod
def set(
key: Optional[str],
value: Dict[str, Any],
timeout: Optional[int] = None,
datasource_uid: Optional[str] = None,
region: CacheRegion = CacheRegion.DEFAULT,
) -> None:
"""
set value to specify cache region, proxy for `set_and_log_cache`
"""
if key:
set_and_log_cache(_cache[region], key, value, timeout, datasource_uid)
| 36.433333
| 85
| 0.619396
|
fe647177db346bfa38f1a0e6396789fab73fdbb2
| 7,889
|
py
|
Python
|
main.py
|
iCurlmyster/Nerual-Network-with-Human-Resource-Analytics
|
a5d94354be6e9cd4f2c24289e257247d367eef89
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
iCurlmyster/Nerual-Network-with-Human-Resource-Analytics
|
a5d94354be6e9cd4f2c24289e257247d367eef89
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
iCurlmyster/Nerual-Network-with-Human-Resource-Analytics
|
a5d94354be6e9cd4f2c24289e257247d367eef89
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import pandas as pd
import numpy as np
import sys
is_verbose = False
do_plot = False
do_relu = False
do_full = False
if len(sys.argv) > 1:
if "-v" in sys.argv:
is_verbose = True
if "-plot" in sys.argv:
do_plot = True
if "-relu" in sys.argv:
do_relu = True
if "-full" in sys.argv:
do_full = True
np.random.seed(7)
tf.set_random_seed(7)
init_data = pd.read_csv("./HR_comma_sep.csv")
if is_verbose:
# take a look at the info
# we see no rows are missing in any column. We also see "sales" and "salary" are object types.
print(init_data.info())
# lets take a look at the first 5 entries of object data
print("Sales: {0}".format(init_data["sales"][:5]))
print("Salary: {0}".format(init_data["salary"][:5]))
# We can see that sales looks like nominal data and salary looks like ordinal data
# Lets see how many unique entries there are
sales_unique_n = init_data["sales"].nunique()
salary_unique_n = init_data["salary"].nunique()
## 10 unique
print("Unique sale categories: {0}".format(sales_unique_n))
## 3 unique
print("Unique salary categories: {0}".format(salary_unique_n))
# Now lets break these categories out into easily digestible features for our model
sales_unique_feature_names = init_data["sales"].unique()
salary_unique_feature_names = init_data["salary"].unique()
# Function to breakdown a category into individual binary features
def break_down_features(feature_list, category, orig_data):
for name in feature_list:
orig_data[category+"_"+name] = [1 if x == name else 0 for _, x in enumerate(orig_data[category])]
break_down_features(sales_unique_feature_names, "sales", init_data)
break_down_features(salary_unique_feature_names, "salary", init_data)
# Now that we have added our new categories we can drop our old object ones.
init_data = init_data.drop(["sales", "salary"], axis=1)
if is_verbose:
# Now lets look at what the percentage of the people that left vs the people that stayed
print(init_data["left"].value_counts() / len(init_data["left"])) ## 0: 0.761917; 1: 0.238083
# Now lets create a stratified splitting method to ensure the data is split with the same percentages
# of our feature of interest as the original data set as a whole.
def stratified_split_data(data, ratio):
# Grab the data into its own category
stayed_data = data.loc[data["left"] == 0]
left_data = data.loc[data["left"] == 1]
# mix up the data
stayed_data = stayed_data.iloc[np.random.permutation(len(stayed_data))]
left_data = left_data.iloc[np.random.permutation(len(left_data))]
test_stayed_set_size = int(len(stayed_data) * ratio)
test_left_set_size = int(len(left_data) * ratio)
# Concatenate the partitioned data
train_set = pd.concat([stayed_data[test_stayed_set_size:], left_data[test_left_set_size:]], ignore_index=True)
test_set = pd.concat([stayed_data[:test_stayed_set_size], left_data[:test_left_set_size]], ignore_index=True)
# Now mix up the concatenated data
train_shuffled_indices = np.random.permutation(len(train_set))
test_shuffled_indices = np.random.permutation(len(test_set))
return train_set.iloc[train_shuffled_indices], test_set.iloc[test_shuffled_indices]
train_set, test_set = stratified_split_data(init_data, 0.2)
if is_verbose:
# Now lets make sure we still have the same percentages
print(train_set["left"].value_counts() / len(train_set["left"]))
data = (train_set.drop("left", axis=1)).values
data_labels = train_set["left"].values
data_labels = data_labels.reshape([len(data_labels), 1])
num_features = data.shape[1]
n_samples = data.shape[0]
X_init = tf.placeholder(tf.float32, [None, num_features])
Y_init = tf.placeholder(tf.float32, [None, 1])
w_1 = tf.Variable(tf.truncated_normal([num_features, 10], stddev=0.01))
b_1 = tf.Variable(tf.truncated_normal([10], stddev=0.01))
layer_1 = tf.nn.elu(tf.add(tf.matmul(X_init, w_1), b_1))
if do_relu:
layer_1 = tf.nn.relu(tf.add(tf.matmul(X_init, w_1), b_1))
w_2 = tf.Variable(tf.truncated_normal([10, 8], stddev=0.01))
b_2 = tf.Variable(tf.truncated_normal([8], stddev=0.01))
layer_2 = tf.nn.elu(tf.add(tf.matmul(layer_1, w_2), b_2))
if do_relu:
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, w_2), b_2))
w_3 = tf.Variable(tf.truncated_normal([8, 1], stddev=0.01))
b_3 = tf.Variable(tf.truncated_normal([1], stddev=0.01))
output_layer = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, w_3), b_3))
cost = -tf.reduce_mean(tf.multiply(Y_init, tf.log(output_layer)) + (1 - Y_init)*tf.log(1 - output_layer) )
optimizer = tf.train.AdamOptimizer(1e-3).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
loss_values = []
num_epochs = 600
if do_full:
num_epochs = 7000
batch_size = 50
count = len(data)
for epoch in range(num_epochs):
start_n = 0
c = None
o = None
if do_full:
_, c = sess.run([optimizer, cost], feed_dict={X_init:data, Y_init:data_labels})
else:
while start_n < count:
sess.run(optimizer, feed_dict={X_init:data[start_n:(start_n + batch_size)], Y_init:data_labels[start_n:(start_n + batch_size)]})
start_n += batch_size
c = sess.run(cost, feed_dict={X_init:data, Y_init:data_labels})
loss_values.append(c)
sys.stdout.write("Epoch: {0}/{1} cost: {2} \r".format(epoch+1, num_epochs, c))
sys.stdout.flush()
if is_verbose:
print("Final cost = {0} weights: {1} biases: {2}".format(sess.run(cost, feed_dict={X_init:data, Y_init:data_labels}), sess.run(w_2), sess.run(b_2) ) )
else:
print("Final cost = {0}".format(sess.run(cost, feed_dict={X_init:data, Y_init:data_labels})) )
# Lets take a look at the confusion matrix
predictions = sess.run(output_layer, feed_dict={X_init:data})
if is_verbose:
print("Displaying first 5 predictions and first 5 labels.")
print(predictions[:5])
print(data_labels[:5])
# define our confusion matrix function to spit out all of the data we want to see.
def confusion_matrix(pred_data, act_data, threshold=0.7):
stayed_true = 0
stayed_false = 0
left_true = 0
left_false = 0
for i in range(len(pred_data)):
if pred_data[i][0] >= threshold and act_data[i][0] == 1:
left_true += 1
elif pred_data[i][0] < threshold and act_data[i][0] == 1:
left_false += 1
elif pred_data[i][0] >= threshold and act_data[i][0] == 0:
stayed_false += 1
elif pred_data[i][0] < threshold and act_data[i][0] == 0:
stayed_true += 1
precision = left_true/np.max([1e-5, (left_true + left_false)])
recall = left_true/np.max([1e-5, (left_true + stayed_false)])
f1_score = 2*((precision*recall)/(precision+recall))
print("Stayed True: {0}\nStayed False: {1}\nLeft True: {2}\nLeft False: {3}".format(stayed_true, stayed_false, left_true, left_false))
print("Precision = {0}".format(precision))
print("Recall = {0}".format(recall))
print("F1 score = {0}".format(f1_score))
print("Total Accuracy = {0}".format((stayed_true+left_true)/(len(pred_data))) )
confusion_matrix(predictions, data_labels, 0.33)
if is_verbose:
## test is getting comparable numbers as well
## However lets test the precision and recall and see what is happening
test_data = (test_set.drop("left", axis=1)).values
test_data_labels = test_set["left"].values
test_data_labels = test_data_labels.reshape([len(test_data_labels), 1])
print("Cost for test data: {0}".format(sess.run(cost, feed_dict={X_init:test_data, Y_init:test_data_labels}) ) )
test_predictions = sess.run(output_layer, feed_dict={X_init:test_data})
confusion_matrix(test_predictions, test_data_labels, 0.33)
if do_plot:
import matplotlib.pyplot as plt
plt.plot(loss_values)
plt.show()
sess.close()
| 38.671569
| 154
| 0.699708
|
e5e9f7077e8df48cebaddce82e4c3ff48e23328b
| 23,373
|
py
|
Python
|
category_encoders/tests/test_encoders.py
|
deil87/categorical-encoding
|
c867aa09343b55cbcb600958542d6e8b0c405d86
|
[
"BSD-3-Clause"
] | null | null | null |
category_encoders/tests/test_encoders.py
|
deil87/categorical-encoding
|
c867aa09343b55cbcb600958542d6e8b0c405d86
|
[
"BSD-3-Clause"
] | null | null | null |
category_encoders/tests/test_encoders.py
|
deil87/categorical-encoding
|
c867aa09343b55cbcb600958542d6e8b0c405d86
|
[
"BSD-3-Clause"
] | null | null | null |
import doctest
import math
import os
import random
import sklearn
import pandas as pd
import numpy as np
from datetime import timedelta
from sklearn.utils.estimator_checks import check_transformer_general, check_transformers_unfitted
from unittest2 import TestSuite, TextTestRunner, TestCase # or `from unittest import ...` if on Python 3.4+
import category_encoders as encoders
__author__ = 'willmcginnis'
# subroutines
def create_array(n_rows=1000, extras=False, has_none=True):
"""
Creates a numpy dataset with some categorical variables
:return:
"""
ds = [[
random.random(),
random.random(),
random.choice(['A', 'B', 'C']),
random.choice(['A', 'B', 'C', 'D']) if extras else random.choice(['A', 'B', 'C']),
random.choice(['A', 'B', 'C', None, np.nan]) if has_none else random.choice(['A', 'B', 'C']),
random.choice(['A'])
] for _ in range(n_rows)]
return np.array(ds)
def create_dataset(n_rows=1000, extras=False, has_none=True):
"""
Creates a dataset with some categorical variables
"""
ds = [[
random.random(), # Floats
random.choice([float('nan'), float('inf'), float('-inf'), -0, 0, 1, -1, math.pi]), # Floats with edge scenarios
row, # Unique integers
str(row), # Unique strings
random.choice(['A']), # Invariant
random.choice(['A', 'B_b', 'C_c_c']), # Strings with underscores to test reverse_dummies()
random.choice(['A', 'B', 'C', None]) if has_none else random.choice(['A', 'B', 'C']), # None
random.choice(['A', 'B', 'C', 'D']) if extras else random.choice(['A', 'B', 'C']), # With a new string value
random.choice([12, 43, -32]) # Number in the column name
] for row in range(n_rows)]
df = pd.DataFrame(ds, columns=['float', 'float_edge', 'unique_int', 'unique_str', 'invariant', 'underscore', 'none', 'extra', 321])
return df
def verify_numeric(X_test):
for dt in X_test.dtypes:
numeric = False
if np.issubdtype(dt, np.dtype(int)) or np.issubdtype(dt, np.dtype(float)):
numeric = True
assert numeric
def verify_inverse_transform(x, x_inv):
"""
Verify x is equal to x_inv. The test returns true for NaN.equals(NaN) as it should.
"""
assert x.equals(x_inv)
# data definitions
np_X = create_array(n_rows=100)
np_X_t = create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = create_dataset(n_rows=100)
X_t = create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
# this class utilises parametrised tests where we loop over different encoders
# tests that are applicable to only one encoder are the end of the class
class TestEncoders(TestCase):
def test_np(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# Encode a numpy array
enc = getattr(encoders, encoder_name)()
enc.fit(np_X, np_y)
verify_numeric(enc.transform(np_X_t))
def test_classification(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
cols = ['unique_str', 'underscore', 'extra', 'none', 'invariant', 321]
enc = getattr(encoders, encoder_name)(cols=cols)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(verbose=1)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(drop_invariant=True)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(return_df=False)
enc.fit(X, np_y)
self.assertTrue(isinstance(enc.transform(X_t), np.ndarray))
self.assertEqual(enc.transform(X_t).shape[0], X_t.shape[0], 'Row count must not change')
# documented in issue #122
# when we use the same encoder on two different datasets, it should not explode
# X_a = pd.DataFrame(data=['1', '2', '2', '2', '2', '2'], columns=['col_a'])
# X_b = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'], columns=['col_b']) # different values and name
# y_dummy = [True, False, True, False, True, False]
# enc = getattr(encoders, encoder_name)()
# enc.fit(X_a, y_dummy)
# enc.fit(X_b, y_dummy)
# verify_numeric(enc.transform(X_b))
def test_impact_encoders(self):
for encoder_name in ['LeaveOneOutEncoder', 'TargetEncoder', 'WOEEncoder']:
with self.subTest(encoder_name=encoder_name):
# encode a numpy array and transform with the help of the target
enc = getattr(encoders, encoder_name)()
enc.fit(np_X, np_y)
verify_numeric(enc.transform(np_X_t, np_y_t))
# target is a DataFrame
enc = getattr(encoders, encoder_name)()
enc.fit(X, y)
verify_numeric(enc.transform(X_t, y_t))
# when we run transform(X, y) and there is a new value in X, something is wrong and we raise an error
enc = getattr(encoders, encoder_name)(impute_missing=True, handle_unknown='error', cols=['extra'])
enc.fit(X, y)
self.assertRaises(ValueError, enc.transform, (X_t, y_t))
def test_error_handling(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# we exclude some columns
X = create_dataset(n_rows=100)
X = X.drop(['unique_str', 'none'], axis=1)
X_t = create_dataset(n_rows=50, extras=True)
X_t = X_t.drop(['unique_str', 'none'], axis=1)
# illegal state, we have to first train the encoder...
enc = getattr(encoders, encoder_name)()
with self.assertRaises(ValueError):
enc.transform(X)
# wrong count of attributes
enc = getattr(encoders, encoder_name)()
enc.fit(X, y)
with self.assertRaises(ValueError):
enc.transform(X_t.iloc[:, 0:3])
# no cols
enc = getattr(encoders, encoder_name)(cols=[])
enc.fit(X, y)
self.assertTrue(enc.transform(X_t).equals(X_t))
def test_handle_unknown_error(self):
# BaseN has problems with None -> ignore None
X = create_dataset(n_rows=100, has_none=False)
X_t = create_dataset(n_rows=50, extras=True, has_none=False)
for encoder_name in (set(encoders.__all__) - {'HashingEncoder'}): # HashingEncoder supports new values by design -> excluded
with self.subTest(encoder_name=encoder_name):
# new value during scoring
enc = getattr(encoders, encoder_name)(handle_unknown='error')
enc.fit(X, y)
with self.assertRaises(ValueError):
_ = enc.transform(X_t)
def test_sklearn_compliance(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# in sklearn < 0.19.0, these methods require classes,
# in sklearn >= 0.19.0, these methods require instances
if sklearn.__version__ < '0.19.0':
encoder = getattr(encoders, encoder_name)
else:
encoder = getattr(encoders, encoder_name)()
check_transformer_general(encoder_name, encoder)
check_transformers_unfitted(encoder_name, encoder)
def test_inverse_transform(self):
# we do not allow None in these data (but "none" column without any None is ok)
X = create_dataset(n_rows=100, has_none=False)
X_t = create_dataset(n_rows=50, has_none=False)
X_t_extra = create_dataset(n_rows=50, extras=True, has_none=False)
cols = ['underscore', 'none', 'extra', 321]
for encoder_name in ['BaseNEncoder', 'BinaryEncoder', 'OneHotEncoder', 'OrdinalEncoder']:
with self.subTest(encoder_name=encoder_name):
# simple run
enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
enc.fit(X)
verify_inverse_transform(X_t, enc.inverse_transform(enc.transform(X_t)))
# when a new value is encountered, do not raise an exception
enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
enc.fit(X, y)
_ = enc.inverse_transform(enc.transform(X_t_extra))
def test_types(self):
X = pd.DataFrame({
'Int': [1, 2, 1, 2],
'Float': [1.1, 2.2, 3.3, 4.4],
'Complex': [3.45J, 3.45J, 3.45J, 3.45J],
'None': [None, None, None, None],
'Str': ['a', 'c', 'c', 'd'],
'PdTimestamp': [pd.Timestamp('2012-05-01'), pd.Timestamp('2012-05-02'), pd.Timestamp('2012-05-03'), pd.Timestamp('2012-05-06')],
'PdTimedelta': [pd.Timedelta('1 days'), pd.Timedelta('2 days'), pd.Timedelta('1 days'), pd.Timedelta('1 days')],
'TimeDelta': [timedelta(-9999), timedelta(-9), timedelta(-1), timedelta(999)],
'Bool': [False, True, True, False],
'Tuple': [('a', 'tuple'), ('a', 'tuple'), ('a', 'tuple'), ('b', 'tuple')],
# 'Categorical': pd.Categorical(list('bbea'), categories=['e', 'a', 'b'], ordered=True),
# 'List': [[1,2], [2,3], [3,4], [4,5]],
# 'Dictionary': [{1: "a", 2: "b"}, {1: "a", 2: "b"}, {1: "a", 2: "b"}, {1: "a", 2: "b"}],
# 'Set': [{'John', 'Jane'}, {'John', 'Jane'}, {'John', 'Jane'}, {'John', 'Jane'}],
# 'Array': [array('i'), array('i'), array('i'), array('i')]
})
y = [1, 0, 0, 1]
for encoder_name in encoders.__all__:
encoder = getattr(encoders, encoder_name)()
encoder.fit_transform(X, y)
# encoder specific tests
def test_binary_bin(self):
data = np.array(['a', 'ba', 'ba'])
out = encoders.BinaryEncoder().fit_transform(data)
self.assertTrue(pd.DataFrame([[0, 1], [1, 0], [1, 0]], columns=['0_0', '0_1']).equals(out))
def test_binary_dist(self):
data = np.array(['apple', 'orange', 'peach', 'lemon'])
encoder = encoders.BinaryEncoder()
encoder.fit(data)
# split dataframe into two transforms and recombine
a = encoder.transform(data[:1])
b = encoder.transform(data[1:])
split = pd.concat([a, b])
split = split.reset_index(drop=True)
# run all at once
c = encoder.transform(data)
# make sure they are the same
self.assertTrue(split.equals(c))
def test_leave_one_out(self):
enc = encoders.LeaveOneOutEncoder(verbose=1, randomized=True, sigma=0.1)
enc.fit(X, y)
verify_numeric(enc.transform(X_t))
verify_numeric(enc.transform(X_t, y_t))
def test_leave_one_out_values(self):
df = pd.DataFrame({
'color': ["a", "a", "a", "b", "b", "b"],
'outcome': [1, 0, 0, 1, 0, 1]})
X = df.drop('outcome', axis=1)
y = df.drop('color', axis=1)
ce_leave = encoders.LeaveOneOutEncoder(cols=['color'], randomized=False)
obtained = ce_leave.fit_transform(X, y['outcome'])
self.assertEquals([0.0, 0.5, 0.5, 0.5, 1.0, 0.5], list(obtained['color']))
def test_leave_one_out_fit_callTwiceOnDifferentData_ExpectRefit(self):
x_a = pd.DataFrame(data=['1', '2', '2', '2', '2', '2'], columns=['col_a'])
x_b = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'], columns=['col_b']) # different values and name
y_dummy = [True, False, True, False, True, False]
encoder = encoders.LeaveOneOutEncoder()
encoder.fit(x_a, y_dummy)
encoder.fit(x_b, y_dummy)
mapping = encoder.mapping
self.assertEqual(1, len(mapping))
col_b_mapping = mapping[0]
self.assertEqual('col_b', col_b_mapping['col']) # the model must get updated
self.assertEqual({'sum': 2.0, 'count': 3, 'mean': 2.0/3.0}, col_b_mapping['mapping']['1'])
self.assertEqual({'sum': 1.0, 'count': 3, 'mean': 01.0/3.0}, col_b_mapping['mapping']['2'])
def test_one_hot(self):
enc = encoders.OneHotEncoder(verbose=1, return_df=False)
enc.fit(X)
self.assertEqual(enc.transform(X_t).shape[1],
enc.transform(X_t[X_t['extra'] != 'A']).shape[1],
'We have to get the same count of columns')
enc = encoders.OneHotEncoder(verbose=1, return_df=True, impute_missing=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, impute_missing=True, handle_unknown='ignore')
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, impute_missing=True, handle_unknown='error')
enc.fit(X)
with self.assertRaises(ValueError):
out = enc.transform(X_t)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='ignore', use_cat_names=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_A', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
# test inverse_transform
X_i = create_dataset(n_rows=100, has_none=False)
X_i_t = create_dataset(n_rows=50, has_none=False)
X_i_t_extra = create_dataset(n_rows=50, extras=True, has_none=False)
cols = ['underscore', 'none', 'extra', 321]
enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
enc.fit(X_i)
obtained = enc.inverse_transform(enc.transform(X_i_t))
obtained[321] = obtained[321].astype('int64') # numeric columns are incorrectly typed as object...
verify_inverse_transform(X_i_t, obtained)
def test_ordinal(self):
enc = encoders.OrdinalEncoder(verbose=1, return_df=True, impute_missing=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len(set(out['extra'].values)), 4)
self.assertIn(0, set(out['extra'].values))
self.assertFalse(enc.mapping is None)
self.assertTrue(len(enc.mapping) > 0)
enc = encoders.OrdinalEncoder(verbose=1, mapping=enc.mapping, return_df=True, impute_missing=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len(set(out['extra'].values)), 4)
self.assertIn(0, set(out['extra'].values))
self.assertTrue(len(enc.mapping) > 0)
enc = encoders.OrdinalEncoder(verbose=1, return_df=True, impute_missing=True, handle_unknown='ignore')
enc.fit(X)
out = enc.transform(X_t)
out_cats = [x for x in set(out['extra'].values) if np.isfinite(x)]
self.assertEqual(len(out_cats), 3)
self.assertFalse(enc.mapping is None)
def test_ordinal_dist(self):
data = np.array([
['apple', None],
['peach', 'lemon']
])
encoder = encoders.OrdinalEncoder(impute_missing=True)
encoder.fit(data)
a = encoder.transform(data)
self.assertEqual(a.values[0, 1], 0)
self.assertEqual(a.values[1, 1], 1)
encoder = encoders.OrdinalEncoder(impute_missing=False)
encoder.fit(data)
a = encoder.transform(data)
self.assertTrue(np.isnan(a.values[0, 1]))
self.assertEqual(a.values[1, 1], 1)
def test_target_encoder(self):
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2)
enc.fit(X, y)
verify_numeric(enc.transform(X_t))
verify_numeric(enc.transform(X_t, y_t))
def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame(
{'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'],
'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
encoder.fit(binary_cat_example, binary_cat_example['target'])
trend_mapping = encoder.mapping[0]['mapping']
self.assertAlmostEqual(0.4125, trend_mapping['DOWN']['smoothing'], delta=1e-4)
self.assertEqual(0.5, trend_mapping['FLAT']['smoothing'])
self.assertAlmostEqual(0.5874, trend_mapping['UP']['smoothing'], delta=1e-4)
def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame(
{'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'],
'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
result = encoder.fit_transform(binary_cat_example, binary_cat_example['target'])
values = result['Trend'].values
self.assertAlmostEqual(0.5874, values[0], delta=1e-4)
self.assertAlmostEqual(0.5874, values[1], delta=1e-4)
self.assertAlmostEqual(0.4125, values[2], delta=1e-4)
self.assertEqual(0.5, values[3])
def test_woe(self):
cols = ['unique_str', 'underscore', 'extra', 'none', 'invariant', 321]
# balanced label with balanced features
X_balanced = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'], columns=['col1'])
y_balanced = [True, False, True, False, True, False]
enc = encoders.WOEEncoder()
enc.fit(X_balanced, y_balanced)
X1 = enc.transform(X_balanced)
self.assertTrue(all(X1.sum() < 0.001), "When the class label is balanced, WoE should sum to 0 in each transformed column")
enc = encoders.WOEEncoder(cols=cols)
enc.fit(X, np_y)
X1 = enc.transform(X_t)
verify_numeric(X1[cols])
self.assertTrue(np.isfinite(X1[cols].values).all(), 'There must not be any NaN, inf or -inf in the transformed columns')
self.assertEqual(len(list(X_t)), len(list(X1)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X1), 'The count of rows must not change')
X2 = enc.transform(X_t, np_y_t)
verify_numeric(X2)
self.assertTrue(np.isfinite(X2[cols].values).all(), 'There must not be any NaN, inf or -inf in the transformed columns')
self.assertEqual(len(list(X_t)), len(list(X2)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X2), 'The count of rows must not change')
X3 = enc.transform(X, np_y)
verify_numeric(X3)
self.assertTrue(np.isfinite(X3[cols].values).all(), 'There must not be any NaN, inf or -inf in the transformed columns')
self.assertEqual(len(list(X)), len(list(X3)), 'The count of attributes must not change')
self.assertEqual(len(X), len(X3), 'The count of rows must not change')
self.assertTrue(X3['unique_str'].var() < 0.001, 'The unique string column must not be predictive of the label')
X4 = enc.fit_transform(X, np_y)
verify_numeric(X4)
self.assertTrue(np.isfinite(X4[cols].values).all(), 'There must not be any NaN, inf or -inf in the transformed columns')
self.assertEqual(len(list(X)), len(list(X4)), 'The count of attributes must not change')
self.assertEqual(len(X), len(X4), 'The count of rows must not change')
self.assertTrue(X4['unique_str'].var() < 0.001, 'The unique string column must not be predictive of the label')
enc = encoders.WOEEncoder()
enc.fit(X, np_y)
X1 = enc.transform(X_t)
self.assertEqual(len(list(X_t)), len(list(X1)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X1), 'The count of rows must not change')
verify_numeric(X1)
X2 = enc.transform(X_t, np_y_t)
verify_numeric(X2)
self.assertEqual(len(list(X_t)), len(list(X2)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X2), 'The count of rows must not change')
# seed
enc = encoders.WOEEncoder(cols=cols, random_state=2001, randomized=True)
enc.fit(X, np_y)
X1 = enc.transform(X_t, np_y_t)
X2 = enc.transform(X_t, np_y_t)
self.assertTrue(X1.equals(X2), "When the seed is given, the results must be identical")
verify_numeric(X1)
verify_numeric(X2)
# invariant target
y_invariant = [True, True, True, True, True, True]
enc = encoders.WOEEncoder()
with self.assertRaises(ValueError):
enc.fit(X_balanced, y_invariant)
# branch coverage unit tests - no cols
enc = encoders.WOEEncoder(cols=[])
enc.fit(X, np_y)
self.assertTrue(enc.transform(X_t).equals(X_t))
# missing values in the target
y_missing = [True, True, None, True, True, True]
enc = encoders.WOEEncoder()
with self.assertRaises(ValueError):
enc.fit(X_balanced, y_missing)
# impute missing
enc = encoders.WOEEncoder(impute_missing=False)
enc.fit(X, np_y)
X1 = enc.transform(X_t)
verify_numeric(X1)
self.assertTrue(X1.isnull().values.any())
self.assertEqual(len(list(X_t)), len(list(X1)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X1), 'The count of rows must not change')
X2 = enc.transform(X_t, np_y_t)
verify_numeric(X2)
self.assertTrue(X1.isnull().values.any())
self.assertEqual(len(list(X_t)), len(list(X2)), 'The count of attributes must not change')
self.assertEqual(len(X_t), len(X2), 'The count of rows must not change')
# beware: for some reason doctest does not raise exceptions - you have to read the text output
def test_doc(self):
suite = TestSuite()
for filename in os.listdir('../'):
if filename.endswith(".py"):
suite.addTest(doctest.DocFileSuite('../' + filename))
runner = TextTestRunner(verbosity=2)
runner.run(suite)
| 45.472763
| 148
| 0.591965
|
bbf8ed787af592ca4a5e3d4a3d0585122b0ecab1
| 4,648
|
py
|
Python
|
tasks/Scrapy/scrapy_official_newspapers/spiders/USFR_1.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 1
|
2021-05-17T02:30:30.000Z
|
2021-05-17T02:30:30.000Z
|
tasks/Scrapy/scrapy_official_newspapers/spiders/USFR_1.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 2
|
2022-01-13T03:44:13.000Z
|
2022-03-12T00:57:48.000Z
|
tasks/Scrapy/scrapy_official_newspapers/spiders/USFR_1.py
|
thefirebanks/policy-data-analyzer
|
670a4ea72ab71975b84c4a4ec43d573371c4a986
|
[
"RSA-MD"
] | 1
|
2021-05-16T20:50:38.000Z
|
2021-05-16T20:50:38.000Z
|
import csv
import datetime
import json
import scrapy
from scrapy_official_newspapers.items import ScrapyOfficialNewspapersItem
from scrapy_official_newspapers.spiders import BaseSpider
class USFR_1(BaseSpider):
name = "USFR_1"
country = "USA"
country_code = "US" # You can find the ISO3166 country code here: https://gist.github.com/ssskip/5a94bfcd2835bf1dea52
state_name = "Federal"
satate_code = "" # As per the Holidays package, you can find the code here https://pypi.org/project/holidays/ if avaiable.
source = "Federal Register"
spider_builder = "Jordi Planas"
scrapable = "True"
allowed_domains = ["api.govinfo.gov"]
start_date = "2015-03-01"
stop_date = "2021-03-10"
# API_key_file = 'C:/Users/user/Google Drive/Els_meus_documents/projectes/CompetitiveIntelligence/WRI/Notebooks/credentials/us_gov_api_key.json'
# API_key_file = 'C:/Users/jordi/Documents/claus/us_gov_api_key.json'
API_key_file = '/home/propietari/Documents/claus/us_gov_api_key.json'
# API_key_file = '/home/propietari/Documents/claus/us_gov_api_key_jpc.json'
def __init__(self):
# First we import the two dictionaries that we are going to use to filter the policies.
self.keyword_dict = self.import_json('./keywords_and_dictionaries/keywords_knowledge_domain_EN.json')
self.negative_keyword_dict = self.import_json('./keywords_and_dictionaries/negative_keywords_knowledge_domain_EN.json')
# We upload the credentials to access the API
self.API_key = self.import_json(self.API_key_file)["us gov apikey jp"]
# This is to set the time span variables.
self.from_date, self.today = self.create_date_span(self.start_date)
self.to_date = datetime.datetime.strptime(self.stop_date, '%Y-%m-%d').date()
# This piece of code is to deal with the fact that the scraping can't be done in a single batch. The spiders breaks when the user credentials reach its limits
# as the items are not recovered consecutively, the final list has gaps. This piece of code is to fill the gaps in ulterior runs.
path = "/home/propietari/Documents/GitHub/policy-data-analyzer/tasks/Scrapy/scrapy_official_newspapers/output/"
file_name = "USFR_20210310.csv"
file = path + file_name
self.dates_dict = {}
with open(file, 'r', errors="surrogateescape") as f:
reader = csv.reader(f)
for row in reader:
self.dates_dict[datetime.datetime.strptime(row[6], '%Y-%m-%d').date()] = 0
def start_requests(self):
for day in self.create_date_list(self.from_date, self.stop_date, 1, "days", self.country_code):
if day not in self.dates_dict:
print(day)
start_url = f'https://api.govinfo.gov/packages/FR-{day}/granules?offset=0&pageSize=300&api_key={self.API_key}'
#print(start_urls)
yield scrapy.Request(start_url, dont_filter=True, callback=self.parse)
def parse(self, response):
for granule in json.loads(response.text)["granules"]:
url = granule["granuleLink"] + f'?api_key={self.API_key}'
# self.debug(url)
yield scrapy.Request(url, dont_filter=True, callback=self.parse_other)
def parse_other(self, response):
summary_full = response.json()
#self.debug(summary_full)
title = summary_full["title"]
if "summary" in summary_full:
summary = summary_full["summary"]
else:
summary = ""
text_to_search = summary_full["title"] + " " + summary
if self.search_keywords(text_to_search, self.keyword_dict, self.negative_keyword_dict):
item = ScrapyOfficialNewspapersItem()
item['country'] = self.country
item['state'] = self.state_name
item['data_source'] = self.source
item['law_class'] = summary_full['category']
item['title'] = summary_full['title']
item['reference'] = summary_full['granuleId']
item['authorship'] = summary_full['agencies'][0]['name']
item['summary'] = summary
item['publication_date'] = summary_full['dateIssued']
item['url'] = summary_full['download']['txtLink'].replace('htm', 'summary?api_key=')
doc_url = summary_full['download']['txtLink'] + f'?api_key={self.API_key}'
# self.debug(doc_url)
item['file_urls'] = [doc_url]
item['doc_name'] = self.HSA1_encoding(summary_full['download']['txtLink'] + f'?api_key={self.API_key}') + ".txt"
yield item
| 49.978495
| 166
| 0.664587
|
26136898f383904bfd389de08d8eb1ccfb55142f
| 1,901
|
py
|
Python
|
naiivedisjointsets.py
|
orionsring/basic_python
|
2556d1b3d95334f41cf688f61d4e65bbf38e701a
|
[
"MIT"
] | null | null | null |
naiivedisjointsets.py
|
orionsring/basic_python
|
2556d1b3d95334f41cf688f61d4e65bbf38e701a
|
[
"MIT"
] | null | null | null |
naiivedisjointsets.py
|
orionsring/basic_python
|
2556d1b3d95334f41cf688f61d4e65bbf38e701a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import unittest
import disjoint
class Disjoint:
def __init__(self):
self._sets = []
def createSet(self, repr):
self._sets.apend(repr) #
def mergeSets(self, repr1, repr2):
set1 = self.findset(repr1)
set2 = sefl.findSet(repr2)
if set1 != set2:
set1.extend(set2)
self._sets.remove(set2)
def findSet(self, repr1):
for oneSet in self._sets:
if repr1 in oneSet:
return oneSet
def getSets(self): # python convention is to expose attributs directly
return self._sets
# test class :
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
pass
def test_empty(self):
dis = disjoint.Disjoint()
self.assertEqual([], dis.getSets())
self.assertEqual(None, dis.findSet(1))
def test_init(self):
dis = disjoint.Disjoint();
for i in range(1, 6):
dis.createSet(i)
for i in range(1, 6):
found = dis.findSet(i)
self.assertEqual(1, len(found))
self.assertEqual(i, found[0])
expected = [[i] for i in range(1, 6)]
self.assertEqual(expected, dis.getSets())
def test_simple(self):
dis = disjoint.Disjoint()
for i in range(1, 6):
dis.createSet(i)
pairs = [[1, 2], [2, 4], [4, 5]]
for p in pairs:
p1 = p[0]
p2 = p[1]
if dis.findSet(p1) != dis.findSet(p2):
dis.mergeSets(p1, p2)
expectedSets = [[1, 2, 4, 5], [3]]
self.assertEqual(expectedSets, dis.getSets())
if __name__ == '__main__':
unittest.main()
# Note that there is cameCase here and that is not part of pep - 8
# Note 2 - The getSets method is unnecessarry -
| 26.041096
| 88
| 0.533403
|
8f3d090b54ff1fcf7e54a93c88752bb2068ee90c
| 831
|
py
|
Python
|
corehq/apps/sms/handlers/forwarding.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/sms/handlers/forwarding.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/sms/handlers/forwarding.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from corehq.apps.sms.models import (
ForwardingRule, FORWARD_ALL, FORWARD_BY_KEYWORD,
)
from corehq.apps.sms.api import send_sms_with_backend
def forwarding_handler(v, text, msg):
rules = ForwardingRule.view("sms/forwarding_rule",
key=[v.domain], include_docs=True).all()
text_words = text.upper().split()
keyword_to_match = text_words[0] if len(text_words) > 0 else ""
for rule in rules:
matches_rule = False
if rule.forward_type == FORWARD_ALL:
matches_rule = True
elif rule.forward_type == FORWARD_BY_KEYWORD:
matches_rule = (keyword_to_match == rule.keyword.upper())
if matches_rule:
send_sms_with_backend(v.domain, "+%s" % v.phone_number, text,
rule.backend_id)
return True
return False
| 34.625
| 73
| 0.652226
|
26827ada9161bf8a8f937ba561e7037f89c4bae1
| 373
|
py
|
Python
|
custom/icds_reports/migrations/0144_merge_20191106_1522.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/icds_reports/migrations/0144_merge_20191106_1522.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
custom/icds_reports/migrations/0144_merge_20191106_1522.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-11-06 15:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0143_remove_group_by_thr_view'),
('icds_reports', '0143_add_ucrreconciliationstatus_squashed'),
]
operations = [
]
| 21.941176
| 70
| 0.69437
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.