content
stringlengths 5
1.05M
|
|---|
from numpy.polynomial import Chebyshev
polyT = poly.convert(kind=Chebyshev)
|
import sys
import logging
import os
import time
import re
import threading
import hashlib
import xml.dom.minidom
import tokens
from datetime import datetime
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
RESPONSE_HANDLER_INSTANCE = None
SPLUNK_PORT = 8089
STANZA = None
SESSION_TOKEN = None
REGEX_PATTERN = None
# dynamically load in any eggs in /etc/apps/snmp_ta/bin
EGG_DIR = SPLUNK_HOME + "/etc/apps/rest_ta/bin/"
for filename in os.listdir(EGG_DIR):
if filename.endswith(".egg"):
sys.path.append(EGG_DIR + filename)
import requests
import json
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
from requests_oauthlib import OAuth1
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import WebApplicationClient
from requests.auth import AuthBase
from splunklib.client import connect
from splunklib.client import Service
from croniter import croniter
# set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
# with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>REST</title>
<description>REST API input for polling data from RESTful endpoints</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>REST input name</title>
<description>Name of this REST input</description>
</arg>
<arg name="endpoint">
<title>Endpoint URL</title>
<description>URL to send the HTTP GET request to</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="http_method">
<title>HTTP Method</title>
<description>HTTP method to use.Defaults to GET. POST and PUT are not really RESTful for requesting data from the API, but useful to have the option for target APIs that are "REST like"</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="request_payload">
<title>Request Payload</title>
<description>Request payload for POST and PUT HTTP Methods</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="auth_type">
<title>Authentication Type</title>
<description>Authentication method to use : none | basic | digest | oauth1 | oauth2 | custom</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="auth_user">
<title>Authentication User</title>
<description>Authentication user for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="auth_password">
<title>Authentication Password</title>
<description>Authentication password for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_key">
<title>OAUTH 1 Client Key</title>
<description>OAUTH 1 client key</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_secret">
<title>OAUTH 1 Client Secret</title>
<description>OAUTH 1 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token">
<title>OAUTH 1 Access Token</title>
<description>OAUTH 1 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token_secret">
<title>OAUTH 1 Access Token Secret</title>
<description>OAUTH 1 access token secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_token_type">
<title>OAUTH 2 Token Type</title>
<description>OAUTH 2 token type</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_access_token">
<title>OAUTH 2 Access Token</title>
<description>OAUTH 2 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_token">
<title>OAUTH 2 Refresh Token</title>
<description>OAUTH 2 refresh token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_url">
<title>OAUTH 2 Token Refresh URL</title>
<description>OAUTH 2 token refresh URL</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_props">
<title>OAUTH 2 Token Refresh Propertys</title>
<description>OAUTH 2 token refresh propertys : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_id">
<title>OAUTH 2 Client ID</title>
<description>OAUTH 2 client ID</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_secret">
<title>OAUTH 2 Client Secret</title>
<description>OAUTH 2 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_header_propertys">
<title>HTTP Header Propertys</title>
<description>Custom HTTP header propertys : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="url_args">
<title>URL Arguments</title>
<description>Custom URL arguments : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_type">
<title>Response Type</title>
<description>Rest Data Response Type : json | xml | text</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="streaming_request">
<title>Streaming Request</title>
<description>Whether or not this is a HTTP streaming request : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_proxy">
<title>HTTP Proxy Address</title>
<description>HTTP Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="https_proxy">
<title>HTTPs Proxy Address</title>
<description>HTTPs Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="request_timeout">
<title>Request Timeout</title>
<description>Request Timeout in seconds</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="backoff_time">
<title>Backoff Time</title>
<description>Time in seconds to wait for retry after error or timeout</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="polling_interval">
<title>Polling Interval</title>
<description>Interval time in seconds to poll the endpoint</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="sequential_mode">
<title>Sequential Mode</title>
<description>Whether multiple requests spawned by tokenization are run in parallel or sequentially</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="sequential_stagger_time">
<title>Sequential Stagger Time</title>
<description>An optional stagger time period between sequential requests</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="delimiter">
<title>Delimiter</title>
<description>Delimiter to use for any multi "key=value" field inputs</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="index_error_response_codes">
<title>Index Error Responses</title>
<description>Whether or not to index error response codes : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler">
<title>Response Handler</title>
<description>Python classname of custom response handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler_args">
<title>Response Handler Arguments</title>
<description>Response Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_filter_pattern">
<title>Response Filter Pattern</title>
<description>Python Regex pattern, if present , responses must match this pattern to be indexed</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler">
<title>Custom_Auth Handler</title>
<description>Python classname of custom auth handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler_args">
<title>Custom_Auth Handler Arguments</title>
<description>Custom Authentication Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="cookies">
<title>Cookies</title>
<description>Persist cookies in format key=value,key2=value2,...</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def get_current_datetime_for_cron():
current_dt = datetime.now()
# dont need seconds/micros for cron
current_dt = current_dt.replace(second=0, microsecond=0)
return current_dt
def do_validate():
config = get_validation_config()
# TODO
# if error , print_validation_error & sys.exit(2) \
def get_credentials(session_key):
myapp = 'rest_ta'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
owner='nobody', sessionKey=session_key)
except Exception, e:
raise Exception("Could not get credentials from splunk. Error: %s"
% (myapp, str(e)))
# return first set of credentials
for i, c in entities.items():
return c['username'], c['clear_password']
raise Exception("No credentials have been found, have you setup the App yet ?")
def do_run(config,endpoint_list):
app_name = "REST API Modular Input"
# setup some globals
server_uri = config.get("server_uri")
global SPLUNK_PORT
global STANZA
global SESSION_TOKEN
global delimiter
SPLUNK_PORT = server_uri[18:]
STANZA = config.get("name")
SESSION_TOKEN = config.get("session_key")
# encrypted_username, encrypted_password = get_credentials(SESSION_TOKEN)
# params
http_method = config.get("http_method", "GET")
request_payload = config.get("request_payload")
# none | basic | digest | oauth1 | oauth2
auth_type = config.get("auth_type", "none")
#Delimiter to use for any multi "key=value" field inputs
delimiter = config.get("delimiter", ",")
#for basic and digest
auth_user = config.get("auth_user")
auth_password = config.get("auth_password")
# for oauth1
oauth1_client_key = config.get("oauth1_client_key")
oauth1_client_secret = config.get("oauth1_client_secret")
oauth1_access_token = config.get("oauth1_access_token")
oauth1_access_token_secret = config.get("oauth1_access_token_secret")
# for oauth2
oauth2_token_type = config.get("oauth2_token_type", "Bearer")
oauth2_access_token = config.get("oauth2_access_token")
oauth2_refresh_token = config.get("oauth2_refresh_token")
oauth2_refresh_url = config.get("oauth2_refresh_url")
oauth2_refresh_props_str = config.get("oauth2_refresh_props")
oauth2_client_id = config.get("oauth2_client_id")
oauth2_client_secret = config.get("oauth2_client_secret")
oauth2_refresh_props = {}
if oauth2_refresh_props_str is not None:
oauth2_refresh_props = dict((k.strip(), v.strip()) for k, v in
(item.split('=', 1) for item in oauth2_refresh_props_str.split(delimiter)))
oauth2_refresh_props['client_id'] = oauth2_client_id
oauth2_refresh_props['client_secret'] = oauth2_client_secret
http_header_propertys = {}
http_header_propertys_str = config.get("http_header_propertys")
if http_header_propertys_str is not None:
http_header_propertys = dict((k.strip(), v.strip()) for k, v in
(item.split('=', 1) for item in http_header_propertys_str.split(delimiter)))
url_args = {}
url_args_str = config.get("url_args")
if url_args_str is not None:
url_args = dict((k.strip(), v.strip()) for k, v in
(item.split('=', 1) for item in url_args_str.split(delimiter)))
# json | xml | text
response_type = config.get("response_type","text")
streaming_request = int(config.get("streaming_request", 0))
http_proxy = config.get("http_proxy")
https_proxy = config.get("https_proxy")
proxies = {}
if http_proxy is not None:
proxies["http"] = http_proxy
if https_proxy is not None:
proxies["https"] = https_proxy
cookies = {}
cookies_str = config.get("cookies")
if cookies_str is not None:
cookies = dict((k.strip(), v.strip()) for k, v in
(item.split('=', 1) for item in cookies_str.split(delimiter)))
request_timeout = int(config.get("request_timeout", 30))
backoff_time = int(config.get("backoff_time", 10))
sequential_stagger_time = int(config.get("sequential_stagger_time", 0))
polling_interval_string = config.get("polling_interval", "60")
if polling_interval_string.isdigit():
polling_type = 'interval'
polling_interval = int(polling_interval_string)
else:
polling_type = 'cron'
cron_start_date = datetime.now()
cron_iter = croniter(polling_interval_string, cron_start_date)
index_error_response_codes = int(config.get("index_error_response_codes", 0))
response_filter_pattern = config.get("response_filter_pattern")
if response_filter_pattern:
global REGEX_PATTERN
REGEX_PATTERN = re.compile(response_filter_pattern)
response_handler_args = {}
response_handler_args_str = config.get("response_handler_args")
if response_handler_args_str is not None:
response_handler_args = dict((k.strip(), v.strip()) for k, v in
(item.split('=', 1) for item in response_handler_args_str.split(delimiter)))
response_handler = config.get("response_handler", "DefaultResponseHandler")
module = __import__("responsehandlers")
class_ = getattr(module, response_handler)
global RESPONSE_HANDLER_INSTANCE
RESPONSE_HANDLER_INSTANCE = class_(**response_handler_args)
custom_auth_handler = config.get("custom_auth_handler")
if custom_auth_handler:
module = __import__("authhandlers")
class_ = getattr(module, custom_auth_handler)
custom_auth_handler_args = {}
custom_auth_handler_args_str = config.get("custom_auth_handler_args")
if custom_auth_handler_args_str is not None:
custom_auth_handler_args = dict((k.strip(), v.strip()) for k, v in (item.split('=', 1) for item in custom_auth_handler_args_str.split(delimiter)))
CUSTOM_AUTH_HANDLER_INSTANCE = class_(**custom_auth_handler_args)
try:
auth = None
oauth2 = None
if auth_type == "basic":
auth = HTTPBasicAuth(auth_user, auth_password)
elif auth_type == "digest":
auth = HTTPDigestAuth(auth_user, auth_password)
elif auth_type == "oauth1":
auth = OAuth1(oauth1_client_key, oauth1_client_secret,
oauth1_access_token, oauth1_access_token_secret)
elif auth_type == "oauth2":
token = {}
token["token_type"] = oauth2_token_type
token["access_token"] = oauth2_access_token
token["refresh_token"] = oauth2_refresh_token
token["expires_in"] = "5"
client = WebApplicationClient(oauth2_client_id)
oauth2 = OAuth2Session(client, token=token, auto_refresh_url=oauth2_refresh_url, auto_refresh_kwargs=oauth2_refresh_props, token_updater=oauth2_token_updater)
elif auth_type == "custom" and CUSTOM_AUTH_HANDLER_INSTANCE:
auth = CUSTOM_AUTH_HANDLER_INSTANCE
req_args = {"verify": False, "stream": bool(streaming_request), "timeout": float(request_timeout)}
if auth:
req_args["auth"] = auth
if url_args:
req_args["params"] = url_args
if cookies:
req_args["cookies"] = cookies
if http_header_propertys:
req_args["headers"] = http_header_propertys
if proxies:
req_args["proxies"] = proxies
if request_payload and not http_method == "GET":
req_args["data"]= request_payload
while True:
if polling_type == 'cron':
next_cron_firing = cron_iter.get_next(datetime)
while get_current_datetime_for_cron() != next_cron_firing:
time.sleep(float(10))
for endpoint in endpoint_list:
if "params" in req_args:
req_args_params_current = dictParameterToStringFormat(req_args["params"])
else:
req_args_params_current = ""
if "cookies" in req_args:
req_args_cookies_current = dictParameterToStringFormat(req_args["cookies"])
else:
req_args_cookies_current = ""
if "headers" in req_args:
req_args_headers_current = dictParameterToStringFormat(req_args["headers"])
else:
req_args_headers_current = ""
if "data" in req_args:
req_args_data_current = req_args["data"]
else:
req_args_data_current = ""
try:
if oauth2:
if http_method == "GET":
r = oauth2.get(endpoint, **req_args)
elif http_method == "POST":
r = oauth2.post(endpoint, **req_args)
elif http_method == "PUT":
r = oauth2.put(endpoint, **req_args)
elif http_method == "HEAD":
r = oauth2.head(endpoint, **req_args)
else:
if http_method == "GET":
r = requests.get(endpoint, **req_args)
elif http_method == "POST":
r = requests.post(endpoint, **req_args)
elif http_method == "PUT":
r = requests.put(endpoint, **req_args)
elif http_method == "HEAD":
r = requests.head(endpoint, **req_args)
except requests.exceptions.Timeout, e:
logging.error("HTTP Request Timeout error: %s" % str(e))
time.sleep(float(backoff_time))
continue
except Exception as e:
logging.error("Exception performing request: %s" % str(e))
time.sleep(float(backoff_time))
continue
try:
r.raise_for_status()
if streaming_request:
for line in r.iter_lines():
if line:
handle_output(r, line, response_type, req_args, endpoint)
else:
handle_output(r, r.text, response_type, req_args, endpoint)
except requests.exceptions.HTTPError, e:
error_output = r.text
error_http_code = r.status_code
if index_error_response_codes:
error_event = ""
error_event += 'http_error_code = %s error_message = %s' % (error_http_code, error_output)
print_xml_single_instance_mode(error_event)
sys.stdout.flush()
logging.error("HTTP Request error: %s" % str(e))
time.sleep(float(backoff_time))
continue
if "data" in req_args:
checkParamUpdated(req_args_data_current, req_args["data"], "request_payload")
if "params" in req_args:
checkParamUpdated(req_args_params_current, dictParameterToStringFormat(req_args["params"]), "url_args")
if "headers" in req_args:
checkParamUpdated(req_args_headers_current, dictParameterToStringFormat(req_args["headers"]), "http_header_propertys")
if "cookies" in req_args:
checkParamUpdated(req_args_cookies_current, dictParameterToStringFormat(req_args["cookies"]), "cookies")
if sequential_stagger_time > 0:
time.sleep(float(sequential_stagger_time))
if polling_type == 'interval':
time.sleep(float(polling_interval))
except RuntimeError, e:
logging.error("Looks like an error: %s" % str(e))
sys.exit(2)
def replaceTokens(raw_string):
try:
url_list = [raw_string]
substitution_tokens = re.findall("\$(?:\w+)\$", raw_string)
for token in substitution_tokens:
token_response = getattr(tokens, token[1:-1])()
if(isinstance(token_response, list)):
temp_list = []
for token_response_value in token_response:
for url in url_list:
temp_list.append(url.replace(token, token_response_value))
url_list = temp_list
else:
for index, url in enumerate(url_list):
url_list[index] = url.replace(token, token_response)
return url_list
except:
e = sys.exc_info()[1]
logging.error("Looks like an error substituting tokens: %s" % str(e))
def checkParamUpdated(cached, current, rest_name):
if not (cached == current):
try:
args = {'host': 'localhost', 'port': SPLUNK_PORT, 'token': SESSION_TOKEN}
service = Service(**args)
item = service.inputs.__getitem__(STANZA[7:])
item.update(**{rest_name: current})
except RuntimeError, e:
logging.error("Looks like an error updating the modular input parameter %s: %s" % (rest_name, str(e), ))
def dictParameterToStringFormat(parameter):
if parameter:
return ''.join(('{}={}' + delimiter).format(key, val) for key, val in parameter.items())[:-1]
else:
return None
def oauth2_token_updater(token):
try:
args = {'host': 'localhost', 'port': SPLUNK_PORT, 'token': SESSION_TOKEN}
service = Service(**args)
item = service.inputs.__getitem__(STANZA[7:])
item.update(oauth2_access_token=token["access_token"], oauth2_refresh_token=token["refresh_token"])
except RuntimeError,e:
logging.error("Looks like an error updating the oauth2 token: %s" % str(e))
def handle_output(response,output,type,req_args,endpoint):
try:
if REGEX_PATTERN:
search_result = REGEX_PATTERN.search(output)
if search_result is None:
return
RESPONSE_HANDLER_INSTANCE(response, output, type, req_args, endpoint)
sys.stdout.flush()
except RuntimeError, e:
logging.error("Looks like an error handle the response output: %s" % str(e))
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % encodeXMLText(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key_node = root.getElementsByTagName("session_key")[0]
if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE:
data = session_key_node.firstChild.data
config["session_key"] = data
server_uri_node = root.getElementsByTagName("server_uri")[0]
if server_uri_node and server_uri_node.firstChild and server_uri_node.firstChild.nodeType == server_uri_node.firstChild.TEXT_NODE:
data = server_uri_node.firstChild.data
config["server_uri"] = data
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
config = get_input_config()
original_endpoint = config.get("endpoint")
# token replacement
endpoint_list = replaceTokens(original_endpoint)
sequential_mode = int(config.get("sequential_mode", 0))
if bool(sequential_mode):
do_run(config, endpoint_list)
else: # parallel mode
for endpoint in endpoint_list:
requester = threading.Thread(target=do_run, args=(config, [endpoint]))
requester.start()
sys.exit(0)
|
#!/usr/bin/env python
# by TR
from obspy.signal.cpxtrace import envelope
from sito.noisexcorr import setHIDist
import matplotlib.pyplot as plt
from sito.stations import IPOCStations
from sito.stream import read
from operator import itemgetter
from scipy.optimize import curve_fit
from matplotlib.patches import Polygon
import numpy as np
def no_corr_pairs(stream, stations):
s1 = set()
s2 = set()
for tr in stream:
s1.add(frozenset(tr.stats.station.split('-')))
for st1 in stations:
for st2 in stations:
if st1 != st2:
s2.add(frozenset({st1 + 'Z', st2 + 'Z'}))
return s2 - s1
def get_vel(stream):
ms = stream.copy()
ms.addXcorrSides()
for tr in ms:
tr.data = np.abs(envelope(tr.data))
dists = ms.getHI('dist')
maxi = ms.getMaxima()
v, _ = curve_fit(lambda x, a: x * a, maxi, dists, p0=1)
return v[0]
#ms = read('/home/richter/Results/IPOC/xcorr/FINAL_filter0.01-0.5_1bit_whitening/stack/day_PB0[12345]Z-PB0[12345]Z_stack_all.QHD')
path = '/home/richter/Results/IPOC/xcorr/FINAL_filter0.01-0.5_1bit_whitening'
ms = read(path + '/stack/day_*_stack_all.QHD')
output = path + '/xcorr_vs_dist.pdf'
ipoc = IPOCStations()
setHIDist(ms, ipoc)
print 'no correlation for pairs:', no_corr_pairs(ms, ipoc)
v = get_vel(ms)
#v = 3.03093386
print 'velocity:', v
fig = plt.figure(figsize=(10, 12))
plot = ms.plotXcorrVsDist(-300, 300, scale=10, fig=fig,
figtitle='%d cross-correlations' % len(ms))
plot.ax.plot((-300, 0, 300), (300 * v, 0, 300 * v), 'r')
d = 30
w = 80
xy = np.array([(d, 0), (300 + d, v * 300), (300 + d + w, v * 300), (d + w, 0)])
polygon1 = Polygon(xy, True, alpha=0.4, color='b', zorder=50)
plot.ax.add_patch(polygon1)
xy[:, 0] *= -1
polygon2 = Polygon(xy, True, alpha=0.4, color='b', zorder=50)
plot.ax.add_patch(polygon2)
plot.ax.set_ylim(0, 720)
plt.savefig(output, bbox_inches='tight')
plt.show()
|
def always_satisfied(values: tuple) -> bool:
return True
def never_satisfied(values: tuple) -> bool:
return False
def all_equal_constraint_evaluator(values: tuple) -> bool:
if len(values) == 1:
return True
first_val = values[0]
for val in values:
if val != first_val:
return False
return True
def all_diff_constraint_evaluator(values: tuple) -> bool:
seen_values = set()
for val in values:
if val in seen_values:
return False
seen_values.add(val)
return True
class MaxSum:
def __init__(self, maximum) -> None:
self.__maximum = maximum
def __call__(self, values: tuple) -> bool:
return sum(values) < self.__maximum
class MinSum:
def __init__(self, minimum) -> None:
self.__minimum = minimum
def __call__(self, values: tuple) -> bool:
return self.__minimum < sum(values)
class ExactSum:
def __init__(self, sum_value) -> None:
self.__sum_value = sum_value
def __call__(self, values: tuple) -> bool:
return sum(values) == self.__sum_value
class ExactLengthExactSum:
def __init__(self, number_of_values: int, sum_value) -> None:
self.__number_of_values = number_of_values
self.__sum_value = sum_value
def __call__(self, values: tuple) -> bool:
if len(values) < self.__number_of_values:
return True
if len(values) == self.__number_of_values:
return sum(values) == self.__sum_value
if len(values) > self.__number_of_values:
return False
class OnlyiConsistentAssignment:
def __init__(self, i_consistent_assignments: set):
self.__i_consistent_assignments = i_consistent_assignments
def __call__(self, values: tuple) -> bool:
return values in self.__i_consistent_assignments
|
from typing import Union
from kivy.graphics.instructions import RenderContext, InstructionGroup
from primal.engine.sprite import ColorSprite
class HealthBar:
def __init__(self, pos, size, max_health: float = 100):
self.max_health = max_health
self.health = max_health
self.size = size
self.bg = ColorSprite(None, pos, size, (.0, .0, .0, .25))
self.bar = ColorSprite(None, pos, size, (1, .0, .0, 1))
def get_current_width(self) -> float:
if self.health < 0:
return 0.0
if self.health > self.max_health:
return self.size[1]
return self.health * (self.size[0] / self.max_health)
def get_health(self) -> float:
return self.health
def set_health(self, health: float):
self.health = max(.0, min(health, self.max_health))
self.bar.set_size((self.get_current_width(), self.size[1]))
def draw(self, canvas: Union[InstructionGroup, RenderContext]):
self.bg.draw(canvas)
self.bar.draw(canvas)
def set_alpha(self, alpha: float):
self.bg.set_alpha(alpha / 4)
self.bar.set_alpha(alpha)
|
#%%
import sys,math,json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from icecream import ic
#%%
if len(sys.argv) == 3:
filename = sys.argv[1]
input_json = sys.argv[2]
else:
filename = "sample_training_data.txt"
input_json = "classic_reg_input.json"
org_training = np.loadtxt(filename)
ic(filename, np.shape(org_training))
#%%
# format of training data
# 0-12 : input parameters
# N, p_tri, p_r, p_nd, p_ld,
# aging, w_th, w_r, q, F,
# alpha, t_max, seed
# 13-22: outputs
# <k>, stddev(k), <w>, pcc(knn), cc
# pcc(ck), O, pcc(Ow), comm_size, comm_degeneracy
training = org_training[ org_training[:,13] > 0,: ]
ic(np.shape(training), training[0])
#%%
# h_params = {
# "ycol": 0,
# "polynomial_degree": 1,
# "alpha": 0.1,
# "l1_ratio": 0.7,
# }
def load_input_json(json_path):
with open(json_path) as f:
return json.load(f)
h_params = load_input_json(input_json)
ic(h_params)
#%%
# parameters:
x = np.log10(training[:,1:6])
x = np.concatenate([training[:,0:1],x,training[:,7:12]], axis=1 )
ic(np.shape(x))
#%%
def scale_input(data):
#from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
#scaler = StandardScaler()
scaled = scaler.fit_transform(data)
return (scaler, scaled)
scaler, x_all = scale_input(x)
ic(scaler.min_, scaler.scale_)
#%%
y_all = training[:,13 + h_params['ycol']]
if h_params['ycol'] == 2:
y_all = np.log10(y_all) # take logarithm for link weight
ic(y_all.shape)
#%%
# Split data in train set and test set
n_samples = x_all.shape[0]
x_train, y_train = x_all[:n_samples // 2], y_all[:n_samples // 2]
x_test, y_test = x_all[n_samples // 2:], y_all[n_samples // 2:]
#%%
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
model = Pipeline([('poly', PolynomialFeatures(degree=3)),
('linear', ElasticNet(alpha=h_params['alpha'], l1_ratio=h_params['l1_ratio'], fit_intercept=False))])
#enet = ElasticNet(alpha=h_params['alpha'], l1_ratio=h_params['l1_ratio'])
y_pred_enet = model.fit(x_train, y_train).predict(x_test)
#r2_score_enet = r2_score(y_test, y_pred_enet)
mse = mean_squared_error(y_test, y_pred_enet)
ic(y_pred_enet[-10:], y_test[-10:], np.sqrt(mse))
# %%
print( json.dumps({"mse": mse}) )
# %%
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, \
unicode_literals
import six
__all__ = ["WordEmbedding", "classify_format"]
import warnings
import numpy as np
from word_embedding_loader import loader, saver
# Mimick namespace
class _glove:
loader = loader.glove
saver = saver.glove
class _word2vec_bin:
loader = loader.word2vec_bin
saver = saver.word2vec_bin
class _word2vec_text:
loader = loader.word2vec_text
saver = saver.word2vec_text
def _select_module(format, binary):
if format == 'glove':
mod = _glove
if binary:
warnings.warn(
b"Argument binary=True for glove loader is ignored.",
UserWarning)
elif format == 'word2vec':
if binary:
mod = _word2vec_bin
else:
mod = _word2vec_text
else:
raise NameError(('Unknown format "%s"' % format).encode('utf-8'))
return mod
def _get_two_lines(f):
"""
Get the first and second lines
Args:
f (filelike): File that is opened for ascii.
Returns:
bytes
"""
l0 = f.readline()
l1 = f.readline()
return l0, l1
def classify_format(f):
"""
Determine the format of word embedding file by their content. This operation
only looks at the first two lines and does not check the sanity of input
file.
Args:
f (Filelike):
Returns:
class
"""
l0, l1 = _get_two_lines(f)
if loader.glove.check_valid(l0, l1):
return _glove
elif loader.word2vec_text.check_valid(l0, l1):
return _word2vec_text
elif loader.word2vec_bin.check_valid(l0, l1):
return _word2vec_bin
else:
raise OSError(b"Invalid format")
class WordEmbedding(object):
"""
Main API for loading and saving of pretrained word embedding files.
.. note:: You do not need to call initializer directly in normal usage.
Instead you should call
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
vectors (numpy.ndarray): Word embedding representation vectors
vocab (dict): Mapping from words (bytes) to vector
indices (int).
freqs (dict): Mapping from words (bytes) to word frequency counts
(int).
Attributes:
vectors (numpy.ndarray): Word embedding vectors in shape of
``(vocabulary size, feature dimension)``.
vocab (dict): Mapping from words (bytes) to vector indices (int)
freqs (dict or None): Mapping from words (bytes) to frequency counts
(int).
"""
def __init__(self, vectors, vocab, freqs=None):
if not isinstance(vectors, np.ndarray):
raise TypeError(
("Expected numpy.ndarray for vectors, %s found."% type(vectors)
).encode('utf-8'))
if not isinstance(vocab, dict):
raise TypeError(
("Expected dict for vocab, %s found." % type(vectors)
).encode('utf-8'))
if len(vectors) != len(vocab):
warnings.warn(
("vectors and vocab size unmatch (%d != %d)" %
(len(vectors), len(vocab))).encode('utf-8'))
self.vectors = vectors
self.vocab = vocab
self.freqs = freqs
self._load_cond = None
@classmethod
def load(cls, path, vocab=None, dtype=np.float32, max_vocab=None,
format=None, binary=False):
"""
Load pretrained word embedding from a file.
Args:
path (str): Path of file to load.
vocab (str or None): Path to vocabulary file created by word2vec
with ``-save-vocab <file>`` option. If vocab is given,
:py:attr:`~vectors` and :py:attr:`~vocab` is ordered in
descending order of frequency.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
format (str or None): Format of the file. ``'word2vec'`` for file
that was implemented in
`word2vec <https://code.google.com/archive/p/word2vec/>`_,
by Mikolov et al.. ``'glove'`` for file that was implemented in
`GloVe <https://nlp.stanford.edu/projects/glove/>`_, Global
Vectors for Word Representation, by Jeffrey Pennington,
Richard Socher, Christopher D. Manning from Stanford NLP group.
If ``None`` is given, the format is guessed from the content.
binary (bool): Load file as binary file as in word embedding file
created by
`word2vec <https://code.google.com/archive/p/word2vec/>`_ with
``-binary 1`` option. If ``format`` is ``'glove'`` or ``None``,
this argument is simply ignored
Returns:
:class:`~word_embedding_loader.word_embedding.WordEmbedding`
"""
freqs = None
if vocab is not None:
with open(vocab, mode='rb') as f:
freqs = loader.vocab.load_vocab(f)
# Create vocab from freqs
# [:None] gives all the list member
vocab = {k: i for i, (k, v) in enumerate(
sorted(six.iteritems(freqs),
key=lambda k_v: k_v[1], reverse=True)[:max_vocab])}
with open(path, mode='rb') as f:
if format is None:
mod = classify_format(f)
else:
mod = _select_module(format, binary)
with open(path, mode='rb') as f:
if vocab is not None:
arr = mod.loader.load_with_vocab(f, vocab, dtype=dtype)
v = vocab
else:
arr, v = mod.loader.load(f, max_vocab=max_vocab, dtype=dtype)
obj = cls(arr, v, freqs)
obj._load_cond = mod
return obj
def save(self, path, format, binary=False, use_load_condition=False):
"""
Save object as word embedding file. For most arguments, you should refer
to :func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
use_load_condition (bool): If `True`, options from
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`
is used.
Raises:
ValueError: ``use_load_condition == True`` but the object is not
initialized via
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
"""
if use_load_condition:
if self._load_cond is None:
raise ValueError(
b"use_load_condition was specified but the object is not "
b"loaded from a file")
# Use load condition
mod = self._load_cond
else:
mod = _select_module(format, binary)
if self.freqs is None:
itr = list(
sorted(six.iteritems(self.vocab), key=lambda k_v: k_v[1]))
else:
itr = list(
sorted(six.iteritems(self.vocab),
key=lambda k_v: self.freqs[k_v[0]], reverse=True)
)
with open(path, mode='wb') as f:
mod.saver.save(f, self.vectors, itr)
def __len__(self):
return len(self.vectors)
@property
def size(self):
"""
Feature dimension of the loaded vector.
Returns:
int
"""
return self.vectors.shape[1]
|
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import trustedanalytics as ta
import pytz
utc = pytz.UTC
# show full stack traces
ta.errors.show_details = True
ta.loggers.set_api()
# TODO: port setup should move to a super class
if ta.server.port != 19099:
ta.server.port = 19099
ta.connect()
class DateTimeTests(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
print "define csv file"
csv = ta.CsvFile("/datasets/dates.csv", schema= [('start', ta.datetime),
('id', int),
('stop', ta.datetime),
('color', str)], delimiter=',')
print "create frame"
self.frame = ta.Frame(csv)
def test_sort_datetime(self):
f = self.frame.copy()
#print "original"
#print f.inspect(wrap=10)
data0 = f.take(n=2)
self.assertEqual("2010-05-01T19:09:01.000Z", data0[0][0])
self.assertEqual("2010-08-15T19:00:34.000Z", data0[1][2])
f.sort("stop")
#print "sorted on stop"
#print f.inspect(wrap=10)
data1 = f.take(n=2)
self.assertEqual("2010-09-01T19:01:20.000Z", data1[0][0])
self.assertEqual("2010-06-15T19:00:35.000Z", data1[1][2])
def test_udf_datetime(self):
f = self.frame.copy()
# Do a couple add_columns...
# 1. Do a date diff by seconds
f.add_columns(lambda row: (row.start - row.stop).total_seconds(), ('diff', ta.float64))
#print f.inspect(wrap=10)
data = f.take(n=5, columns=['diff'])
self.assertEqual(-11836313, data[0][0])
self.assertEqual(9417609, data[4][0])
# 2. Add seconds to a datetime value
def add_seconds(seconds, column):
"""Returns a row function which adds seconds to the named column"""
s = seconds
c = column
from datetime import timedelta
def add_s(row):
return row[c] + timedelta(seconds=s)
return add_s
f.add_columns(add_seconds(20, "stop"), ("stop_plus_20", ta.datetime))
print f.inspect(wrap=10)
data = f.take(n=2, columns=['stop_plus_20'])
self.assertEqual("2010-09-15T19:01:14.000Z", data[0][0])
self.assertEqual("2010-08-15T19:00:54.000Z", data[1][0])
# 3. Add a label according to date range
def add_range_label(range_labels, column):
"""
Returns a row function which adds a label to the row according to the value found in the named column
range_labels is a dict of label -> (inclusive_range_start_datetime, exclusive_range_stop_datetime)
"""
labels = range_labels
c = column
def add_s(row):
for label, range_tuple in labels.items():
if range_tuple[0] <= row[c] < range_tuple[1]:
return label
return None
return add_s
def m(year, month):
"""get datetime for the start of the given month"""
return ta.datetime(year, month, 1, tzinfo=utc)
ranges = {
"winter": (m(2010, 1), m(2010, 4)),
"spring": (m(2010, 4), m(2010, 7)),
"summer": (m(2010, 7), m(2010, 10)),
"fall": (m(2010, 10), m(2011, 1)),
}
f.add_columns(add_range_label(ranges, "start"), ("season", str))
print f.inspect(wrap=10, columns=["start", "season"])
data = map(lambda row: row[0], f.take(n=5, columns=['season']))
mismatches = filter(lambda x: x[0] != x[1], zip(data, ['spring', 'spring', "summer", "summer", "summer"]))
self.assertFalse(mismatches, str(mismatches))
def test_download_datetime(self):
df = self.frame.download()
#print "Pandas DF:\n" + repr(df)
self.assertEqual("2010-05-01T19:09:01.000Z", df["start"][0])
self.assertEqual("2010-07-15T19:00:20.000Z", df["stop"][2])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkcenterlinesmoothing.py,v $
## Language: Python
## Date: $Date: 2006/07/17 09:52:56 $
## Version: $Revision: 1.1 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import vtkvmtk
import sys
import pypes
vmtkcenterlinesmoothing = 'vmtkCenterlineSmoothing'
class vmtkCenterlineSmoothing(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Centerlines = None
self.NumberOfSmoothingIterations = 100
self.SmoothingFactor = 0.1
self.SetScriptName('vmtkcenterlinesmoothing')
self.SetScriptDoc('smooth centerlines with a moving average filter')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input centerlines','vmtksurfacereader'],
['NumberOfSmoothingIterations','iterations','int',1,'(0,)'],
['SmoothingFactor','factor','float',1,'(0.0,)']
])
self.SetOutputMembers([
['Centerlines','o','vtkPolyData',1,'','the output centerlines','vmtksurfacewriter']
])
def Execute(self):
if self.Centerlines == None:
self.PrintError('Error: No input centerlines.')
centerlineSmoothing = vtkvmtk.vtkvmtkCenterlineSmoothing()
centerlineSmoothing.SetInputData(self.Centerlines)
centerlineSmoothing.SetNumberOfSmoothingIterations(self.NumberOfSmoothingIterations)
centerlineSmoothing.SetSmoothingFactor(self.SmoothingFactor)
centerlineSmoothing.Update()
self.Centerlines = centerlineSmoothing.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
"""
ELF (Unix/BSD executable file format) parser.
Author: Victor Stinner, Robert Xiao
Creation date: 08 may 2006
Reference:
- System V Application Binary Interface - DRAFT - 10 June 2013
http://www.sco.com/developers/gabi/latest/contents.html
"""
from hachoir.parser import HachoirParser
from hachoir.field import (RootSeekableFieldSet, FieldSet, Bit, NullBits, RawBits,
UInt8, UInt16, UInt32, UInt64, Enum,
String, RawBytes, Bytes)
from hachoir.core.text_handler import textHandler, hexadecimal
from hachoir.core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class ElfHeader(FieldSet):
MAGIC = b"\x7FELF"
LITTLE_ENDIAN_ID = 1
BIG_ENDIAN_ID = 2
MACHINE_NAME = {
# e_machine, EM_ defines
0: "No machine",
1: "AT&T WE 32100",
2: "SPARC",
3: "Intel 80386",
4: "Motorola 68000",
5: "Motorola 88000",
6: "Intel 80486",
7: "Intel 80860",
8: "MIPS I Architecture",
9: "Amdahl UTS on System/370",
10: "MIPS RS3000 Little-endian",
11: "IBM RS/6000 XXX reserved",
15: "Hewlett-Packard PA-RISC",
16: "NCube XXX reserved",
17: "Fujitsu VPP500",
18: "Enhanced instruction set SPARC",
19: "Intel 80960",
20: "PowerPC 32-bit",
21: "PowerPC 64-bit",
22: "IBM S390",
36: "NEC V800",
37: "Fujitsu FR20",
38: "TRW RH-32",
39: "Motorola RCE",
40: "Advanced RISC Machines (ARM)",
41: "DIGITAL Alpha",
42: "Hitachi Super-H",
43: "SPARC Version 9",
44: "Siemens Tricore",
45: "Argonaut RISC Core",
46: "Hitachi H8/300",
47: "Hitachi H8/300H",
48: "Hitachi H8S",
49: "Hitachi H8/500",
50: "Intel Merced (IA-64) Processor",
51: "Stanford MIPS-X",
52: "Motorola Coldfire",
53: "Motorola MC68HC12",
62: "Advanced Micro Devices x86-64",
75: "DIGITAL VAX",
36902: "used by NetBSD/alpha; obsolete",
}
CLASS_NAME = {
# e_ident[EI_CLASS], ELFCLASS defines
1: "32 bits",
2: "64 bits"
}
TYPE_NAME = {
# e_type, ET_ defines
0: "No file type",
1: "Relocatable file",
2: "Executable file",
3: "Shared object file",
4: "Core file",
0xFF00: "Processor-specific (0xFF00)",
0xFFFF: "Processor-specific (0xFFFF)",
}
OSABI_NAME = {
# e_ident[EI_OSABI], ELFOSABI_ defines
0: "UNIX System V ABI",
1: "HP-UX operating system",
2: "NetBSD",
3: "GNU/Linux",
4: "GNU/Hurd",
5: "86Open common IA32 ABI",
6: "Solaris",
7: "Monterey",
8: "IRIX",
9: "FreeBSD",
10: "TRU64 UNIX",
11: "Novell Modesto",
12: "OpenBSD",
97: "ARM",
255: "Standalone (embedded) application",
}
ENDIAN_NAME = {
# e_ident[EI_DATA], ELFDATA defines
LITTLE_ENDIAN_ID: "Little endian",
BIG_ENDIAN_ID: "Big endian",
}
def createFields(self):
yield Bytes(self, "signature", 4, r'ELF signature ("\x7fELF")')
yield Enum(UInt8(self, "class", "Class"), self.CLASS_NAME)
if self["class"].value == 1:
ElfLongWord = UInt32
else:
ElfLongWord = UInt64
yield Enum(UInt8(self, "endian", "Endian"), self.ENDIAN_NAME)
yield UInt8(self, "file_version", "File version")
yield Enum(UInt8(self, "osabi_ident", "OS/syscall ABI identification"), self.OSABI_NAME)
yield UInt8(self, "abi_version", "syscall ABI version")
yield String(self, "pad", 7, "Pad")
yield Enum(UInt16(self, "type", "File type"), self.TYPE_NAME)
yield Enum(UInt16(self, "machine", "Machine type"), self.MACHINE_NAME)
yield UInt32(self, "version", "ELF format version")
yield textHandler(ElfLongWord(self, "entry", "Entry point"), hexadecimal)
yield ElfLongWord(self, "phoff", "Program header file offset")
yield ElfLongWord(self, "shoff", "Section header file offset")
yield UInt32(self, "flags", "Architecture-specific flags")
yield UInt16(self, "ehsize", "Elf header size (this header)")
yield UInt16(self, "phentsize", "Program header entry size")
yield UInt16(self, "phnum", "Program header entry count")
yield UInt16(self, "shentsize", "Section header entry size")
yield UInt16(self, "shnum", "Section header entry count")
yield UInt16(self, "shstrndx", "Section header string table index")
def isValid(self):
if self["signature"].value != self.MAGIC:
return "Wrong ELF signature"
if self["class"].value not in self.CLASS_NAME:
return "Unknown class"
if self["endian"].value not in self.ENDIAN_NAME:
return "Unknown endian (%s)" % self["endian"].value
return ""
class SectionFlags(FieldSet):
def createFields(self):
field_thunks = (
lambda: Bit(self, "is_writable", "Section contains writable data?"),
lambda: Bit(self, "is_alloc", "Section occupies memory?"),
lambda: Bit(self, "is_exec", "Section contains executable instructions?"),
lambda: NullBits(self, "reserved[]", 1),
lambda: Bit(self, "is_merged", "Section might be merged to eliminate duplication?"),
lambda: Bit(self, "is_strings", "Section contains nul terminated strings?"),
lambda: Bit(self, "is_info_link", "sh_info field of this section header holds section header table index?"),
lambda: Bit(self, "preserve_link_order", "Section requires special ordering for linker?"),
lambda: Bit(self, "os_nonconforming", "Section rqeuires OS-specific processing?"),
lambda: Bit(self, "is_group", "Section is a member of a section group?"),
lambda: Bit(self, "is_tls", "Section contains TLS data?"),
lambda: Bit(self, "is_compressed", "Section contains compressed data?"),
lambda: NullBits(self, "reserved[]", 8),
lambda: RawBits(self, "os_specific", 8, "OS specific flags"),
lambda: RawBits(self, "processor_specific", 4, "Processor specific flags"),
)
if self.root.endian == BIG_ENDIAN:
if self.root.is64bit:
yield RawBits(self, "reserved[]", 32)
for t in reversed(field_thunks):
yield t()
else:
for t in field_thunks:
yield t()
if self.root.is64bit:
yield RawBits(self, "reserved[]", 32)
class SymbolStringTableOffset(UInt32):
def createDisplay(self):
section_index = self['/header/shstrndx'].value
section = self['/section[' + str(section_index) + ']']
text = section.value[self.value:]
text = text.decode('utf-8')
return text.split('\0', 1)[0]
class SectionHeader32(FieldSet):
static_size = 40 * 8
TYPE_NAME = {
# sh_type, SHT_ defines
0: "Inactive",
1: "Program defined information",
2: "Symbol table section",
3: "String table section",
4: "Relocation section with addends",
5: "Symbol hash table section",
6: "Dynamic section",
7: "Note section",
8: "Block started by symbol (BSS) or No space section",
9: "Relocation section without addends",
10: "Reserved - purpose unknown",
11: "Dynamic symbol table section",
}
def createFields(self):
yield SymbolStringTableOffset(self, "name", "Section name (index into section header string table)")
yield Enum(textHandler(UInt32(self, "type", "Section type"), hexadecimal), self.TYPE_NAME)
yield SectionFlags(self, "flags", "Section flags")
yield textHandler(UInt32(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt32(self, "LMA", "Logical memory address (offset in file)"), hexadecimal)
yield textHandler(UInt32(self, "size", "Section size (bytes)"), hexadecimal)
yield UInt32(self, "link", "Index of a related section")
yield UInt32(self, "info", "Type-dependent information")
yield UInt32(self, "addr_align", "Address alignment (bytes)")
yield UInt32(self, "entry_size", "Size of each entry in section")
def createDescription(self):
return "Section header (name: %s, type: %s)" % \
(self["name"].display, self["type"].display)
class SectionHeader64(SectionHeader32):
static_size = 64 * 8
def createFields(self):
yield SymbolStringTableOffset(self, "name", "Section name (index into section header string table)")
yield Enum(textHandler(UInt32(self, "type", "Section type"), hexadecimal), self.TYPE_NAME)
yield SectionFlags(self, "flags", "Section flags")
yield textHandler(UInt64(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt64(self, "LMA", "Logical memory address (offset in file)"), hexadecimal)
yield textHandler(UInt64(self, "size", "Section size (bytes)"), hexadecimal)
yield UInt32(self, "link", "Index of a related section")
yield UInt32(self, "info", "Type-dependent information")
yield UInt64(self, "addr_align", "Address alignment (bytes)")
yield UInt64(self, "entry_size", "Size of each entry in section")
class ProgramFlags(FieldSet):
static_size = 32
FLAGS = (('pf_r', 'readable'), ('pf_w', 'writable'), ('pf_x', 'executable'))
def createFields(self):
if self.root.endian == BIG_ENDIAN:
yield NullBits(self, "padding[]", 29)
for fld, desc in self.FLAGS:
yield Bit(self, fld, "Segment is " + desc)
else:
for fld, desc in reversed(self.FLAGS):
yield Bit(self, fld, "Segment is " + desc)
yield NullBits(self, "padding[]", 29)
def createDescription(self):
attribs = []
for fld, desc in self.FLAGS:
if self[fld].value:
attribs.append(desc)
return 'Segment is ' + ', '.join(attribs)
class ProgramHeader32(FieldSet):
TYPE_NAME = {
# p_type, PT_ defines
0: "Unused program header table entry",
1: "Loadable program segment",
2: "Dynamic linking information",
3: "Program interpreter",
4: "Auxiliary information",
5: "Reserved, unspecified semantics",
6: "Entry for header table itself",
7: "Thread Local Storage segment",
0x70000000: "MIPS_REGINFO",
}
static_size = 32 * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Segment type"), ProgramHeader32.TYPE_NAME)
yield UInt32(self, "offset", "Offset")
yield textHandler(UInt32(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt32(self, "paddr", "P. address"), hexadecimal)
yield UInt32(self, "file_size", "File size")
yield UInt32(self, "mem_size", "Memory size")
yield ProgramFlags(self, "flags")
yield UInt32(self, "align", "Alignment padding")
def createDescription(self):
return "Program Header (%s)" % self["type"].display
class ProgramHeader64(ProgramHeader32):
static_size = 56 * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Segment type"), ProgramHeader32.TYPE_NAME)
yield ProgramFlags(self, "flags")
yield UInt64(self, "offset", "Offset")
yield textHandler(UInt64(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt64(self, "paddr", "P. address"), hexadecimal)
yield UInt64(self, "file_size", "File size")
yield UInt64(self, "mem_size", "Memory size")
yield UInt64(self, "align", "Alignment padding")
class ElfFile(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "elf",
"category": "program",
"file_ext": ("so", ""),
"min_size": 52 * 8, # At least one program header
"mime": (
"application/x-executable",
"application/x-object",
"application/x-sharedlib",
"application/x-executable-file",
"application/x-coredump"),
"magic": ((ElfHeader.MAGIC, 0),),
"description": "ELF Unix/BSD program/library"
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(
self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(ElfHeader.MAGIC)) != ElfHeader.MAGIC:
return "Invalid magic"
err = self["header"].isValid()
if err:
return err
return True
def createFields(self):
# Choose the right endian depending on endian specified in header
if self.stream.readBits(5 * 8, 8, BIG_ENDIAN) == ElfHeader.BIG_ENDIAN_ID:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
# Parse header and program headers
yield ElfHeader(self, "header", "Header")
self.is64bit = (self["header/class"].value == 2)
for index in range(self["header/phnum"].value):
if self.is64bit:
yield ProgramHeader64(self, "prg_header[]")
else:
yield ProgramHeader32(self, "prg_header[]")
self.seekByte(self["header/shoff"].value, relative=False)
for index in range(self["header/shnum"].value):
if self.is64bit:
yield SectionHeader64(self, "section_header[]")
else:
yield SectionHeader32(self, "section_header[]")
for index in range(self["header/shnum"].value):
field = self["section_header[" + str(index) + "]"]
if field['size'].value != 0 and field['type'].value != 8:
# skip NOBITS sections
self.seekByte(field['LMA'].value, relative=False)
yield RawBytes(self, "section[" + str(index) + "]", field['size'].value)
def createDescription(self):
return "ELF Unix/BSD program/library: %s" % (
self["header/class"].display)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Injects failures at specific locations in each of the plan nodes. Currently supports
# two types of failures - cancellation of the query and a failure test hook.
#
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from time import sleep
# Test injecting error logs in prepare phase and status::OK(). This tests one of race
# conditions in error reporting (IMPALA-3385).
class TestErrorLogs(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestErrorLogs, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_errorlog(self, vector):
query = 'select count(*) from tpch.lineitem;'
action = 'INJECT_ERROR_LOG'
location = 'PREPARE'
# scan node has id=0
node_id = 0
# Without restarting impala, reporting thread reports every 5 seconds.
# Having a lower scan range can make the query run more than 2*5 seconds
# Select location in prepare to guarantee at least one cleared error maps will
# be sent to coordinator.
debug_action = '%d:%s:%s' % (node_id, location, action)
vector.get_value('exec_option')['debug_action'] = debug_action
vector.get_value('exec_option')['MAX_SCAN_RANGE_LENGTH'] = '1000'
self.__execute_inject_error_log_action(query, vector)
def __execute_inject_error_log_action(self, query, vector):
try:
handle = self.execute_query_async(query, vector.get_value('exec_option'))
# sleep() is used to terminate the query; otherwise it runs for a long time. It is
# large enough to further guarantee at least one cleared error maps to be sent to
# coordinator.
sleep(30)
cancel_result = self.client.cancel(handle)
self.client.close_query(handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
# As long as impala did not crash we are good.
except ImpalaBeeswaxException:
return
|
x = input().split()
maxx =0
a = int(x[0])
b = int(x[1])
c = int(x[2])
d = int(x[3])
newX = [a,b,c,d]
newX.sort()
print(newX[0] * newX[2])
|
import sys
import socket
import threading
import queue
import base64
import json
import time
# Import specific application methods
import blockchainrpcnetworking
import blockchainrpcprocessing
import blockchainrpcserver
import blockchainrpcbackground
# Create global variables
readheadersjobqueue = queue.Queue()
readheadersjobworkers = []
readheadersjobpusher = threading.Thread(target = blockchainrpcprocessing.blockingreadheaderspusherloop, args = (readheadersjobqueue, readheadersjobworkers))
processheadersjobqueue = queue.Queue()
processheadersjobworkers = []
processheadersjobpusher = threading.Thread(target = blockchainrpcprocessing.blockingprocessheaderspusherloop, args = (processheadersjobqueue, processheadersjobworkers))
readdatajobqueue = queue.Queue()
readdatajobworkers = []
readdatajobpusher = threading.Thread(target = blockchainrpcprocessing.blockingreaddatapusherloop, args = (readdatajobqueue, readdatajobworkers))
servercontentproviderjobqueue = queue.Queue()
servercontentproviderjobpusher = threading.Thread(target = blockchainrpcserver.blockingservercontentprovider, args = (servercontentproviderjobqueue, ))
serverjobqueue = queue.Queue()
serverjobworkers = []
serverjobpusher = threading.Thread(target = blockchainrpcprocessing.blockingserverpusherloop, args = (serverjobqueue, serverjobworkers))
requestrpcjobqueue = queue.Queue()
requestrpcjobworkers = []
requestrpcjobpusher = threading.Thread(target = blockchainrpcprocessing.blockingrequestrpcpusherloop, args = (requestrpcjobqueue, requestrpcjobworkers))
requestrpcsockets = []
acceptthreadlist = []
# Bitcoin Core - addrindex
requestrpcsockets += [(queue.Queue(), (socket.AF_INET, '127.0.0.1', 8332), ('rpcuser', 'rpcpassword'))]
# Bitcoin Unlimited
# requestrpcsockets += [(queue.Queue(), (socket.AF_INET, '127.0.0.1', 8342), ('rpcuser', 'rpcpassword'))]
# Bitcoin Unlimited ipv6
# requestrpcsockets += [(queue.Queue(), (socket.AF_INET6, '0:0:0:0:0:0:0:1', 8342), ('rpcuser', 'rpcpassword'))]
sendresponsejobqueue = queue.Queue()
sendresponsejobworkers = []
sendresponsejobpusher = threading.Thread(target = blockchainrpcprocessing.blockingsendresponsepusherloop, args = (sendresponsejobqueue, sendresponsejobworkers))
incomingsocketrecyclequeue = queue.Queue()
serveraddresslist = []
serveraddresslist += [(socket.AF_INET, '127.0.0.1', 80)]
serveraddresslist += [(socket.AF_INET6, '0:0:0:0:0:0:0:1', 80)]
serveraddresslistlen = len(serveraddresslist)
i = 0
while i < serveraddresslistlen:
serveraddressfamily, serveraddress, serverport = serveraddresslist[i]
acceptthread = threading.Thread(target = blockchainrpcnetworking.blockingacceptloop, args = (serveraddressfamily, serveraddress, serverport, incomingsocketrecyclequeue))
acceptthreadlist += [acceptthread]
i += 1
blockingrpcsyncerloopthread = threading.Thread(target = blockchainrpcbackground.blockingrpcsyncerloop, args = (requestrpcjobqueue, requestrpcjobpusher, requestrpcsockets)).start()
# Start accept connection blocking infinite loop on main thread
while True:
i = 0
acceptthreadlistlen = len(acceptthreadlist)
while i < acceptthreadlistlen:
if not acceptthreadlist[i].is_alive():
try:
acceptthreadlist[i].start()
except RuntimeError:
pass
i += 1
client = incomingsocketrecyclequeue.get()
if not readheadersjobpusher.is_alive():
try:
readheadersjobpusher.start()
except RuntimeError:
pass
print(client.__repr__() + ' on main loop')
readheadersjobqueue.put((client, (processheadersjobqueue, processheadersjobpusher, ((readdatajobqueue, readdatajobpusher), (servercontentproviderjobqueue, servercontentproviderjobpusher, ((serverjobqueue, serverjobpusher, ((requestrpcjobqueue, requestrpcjobpusher, requestrpcsockets),)),)), (sendresponsejobqueue, sendresponsejobpusher, incomingsocketrecyclequeue)))))
#print('readdatajobpusher), (servercontentproviderjobqueue, servercontentproviderjobpusher, ((serverjobqueue, serverjobpusher, ((requestrpcjobqueue, requestrpcjobpusher, requestrpcsockets),)),)), (sendresponsejobqueue, sendresponsejobpusher, incomingsocketrecyclequeue)) = ' + ((readdatajobqueue, readdatajobpusher), (servercontentproviderjobqueue, servercontentproviderjobpusher, ((serverjobqueue, serverjobpusher, ((requestrpcjobqueue, requestrpcjobpusher, requestrpcsockets),)),)), (sendresponsejobqueue, sendresponsejobpusher, incomingsocketrecyclequeue)).__repr__())
|
"""
This module contains any functionality for downloading and
extracting data from any remotes.
"""
import zipfile
import tarfile
import multiprocessing
import requests
from pget.down import Downloader
from audiomate import logutil
logger = logutil.getLogger()
PROGRESS_LOGGER_BYTE_DELAY = 1024 * 1024 * 100
def download_files(url_to_target, num_threads=1):
"""
Download multiple files.
Args:
url_to_target (dict): Dict with mapping from source-url
to target-path.
num_threads (int): Number of threads to use.
"""
dl_items = list(url_to_target.items())
with multiprocessing.pool.ThreadPool(num_threads) as p:
result = list(logger.progress(
p.imap(_download_file, dl_items),
total=len(dl_items),
description='Download Files'
))
return result
def _download_file(item):
""" Helper function to pass (url, target) to ``download_file``. """
return download_file(item[0], item[1])
def download_file(url, target_path, num_threads=1):
"""
Download the file from the given `url` and store it at `target_path`.
Return a tuple x (url, bool, str).
x[0] contains the url.
If download failed x[1] is ``False`` and x[2] contains some error message.
If download was fine x[1] is ``True`` and x[2] contains the target-path.
"""
if num_threads > 1:
return download_file_parallel(
url,
target_path,
num_threads=num_threads
)
r = requests.get(url, stream=True)
if r.status_code != 200:
return (url, False, 'Failed to download file {} (status {})!'.format(
r.status_code,
url
))
file_size = int(requests.head(url).headers['Content-Length'])
bytes_loaded = 0
bytes_since_last_log = 0
logger.info('Download file from "%s" with size: %d B', url, file_size)
with open(target_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
bytes_loaded += 1024
bytes_since_last_log += 1024
if bytes_since_last_log >= PROGRESS_LOGGER_BYTE_DELAY:
logger.info('Download [%06.2f%%]', bytes_loaded / file_size * 100)
bytes_since_last_log = 0
logger.info('Finished download')
return (url, True, target_path)
def download_file_parallel(url, target_path, num_threads=1):
"""
Download the file from the given `url` and store it at `target_path`.
Return a tuple x (url, bool, str).
x[0] contains the url.
If download failed x[1] is ``False`` and x[2] contains some error message.
If download was fine x[1] is ``True`` and x[2] contains the target-path.
"""
downloader = Downloader(url, target_path, num_threads)
downloader.start()
# Wait until we know file size
while downloader.total_length == 0:
pass
file_size = downloader.total_length
logger.info('Download file from "%s" with size: %d B', url, file_size)
bytes_at_last_log = 0
def callback(x):
nonlocal bytes_at_last_log
if x.total_downloaded - bytes_at_last_log >= PROGRESS_LOGGER_BYTE_DELAY:
logger.info('Download [%06.2f%%]', x.total_downloaded / file_size * 100)
bytes_at_last_log = x.total_downloaded
downloader.subscribe(callback, 10)
downloader.wait_for_finish()
logger.info('Finished download')
return (url, True, target_path)
def extract_zip(zip_path, target_folder):
"""
Extract the content of the zip-file at ``zip_path`` into
``target_folder``.
"""
with zipfile.ZipFile(zip_path) as archive:
archive.extractall(target_folder)
def extract_tar(tar_path, target_folder):
"""
Extract the content of the tar-file at ``tar_path`` into
``target_folder``.
"""
with tarfile.open(tar_path, 'r') as archive:
archive.extractall(target_folder)
|
#!/usr/bin/python
import sys
import os
from numpy import *
################################################################################
if len(sys.argv) >= 4:
refFlat_filename = sys.argv[1]
gpd_filename = sys.argv[2]
output_filename = sys.argv[3]
else:
print("usage: python refFlat.txt gpd_filename output_filename")
print("or ./g refFlat.txt gpd_filename output_filename")
sys.exit(1)
################################################################################
def addrefFlat(ref_iso_dt,ref_dt, ref_refFlat_filename):
ref=open(ref_refFlat_filename,'r')
for refline in ref:
ls = refline.strip().split('\t')
gene = ls[0]
ID = ls[1]
chr_name = ls[2]
if ls[8]=="1":
continue
jun_end_ls = ls[9].strip(',').split(',')[1:]
jun_start_ls = ls[10].strip(',').split(',')[:-1]
locus = chr_name+':'+'_'.join(jun_start_ls)+'-'+('_').join(jun_end_ls)
if not ref_iso_dt.has_key(locus):
ref_iso_dt[locus]=[[],[]]
ref_iso_dt[locus][0].append(gene)
ref_iso_dt[locus][1].append(ID)
if not ref_dt.has_key(locus):
ref_dt[locus]=[]
ref_dt[locus].append(refline)
ref.close()
################################################################################
ref_iso_dt = {}
ref_dt = {}
addrefFlat(ref_iso_dt,ref_dt, refFlat_filename)
################################################################################
name_dt={}
tag = open(gpd_filename,'r')
for line in tag:
ls = line.strip().split('\t')
gene = ls[0]
ID = ls[1]
chr_name = ls[2]
if ls[8]=="1":
continue
jun_end_ls = ls[9].strip(',').split(',')[1:]
jun_start_ls = ls[10].strip(',').split(',')[:-1]
locus = chr_name+':'+'_'.join(jun_start_ls)+'-'+('_').join(jun_end_ls)
if ref_iso_dt.has_key(locus):
if len(ref_iso_dt[locus][0])>1:
print "mutligene for:", ls[0:2],ref_iso_dt[locus][0]
name_dt[ID]=[gene,ref_iso_dt[locus][0][0],ref_iso_dt[locus][1]]
tag.close()
################################################################################
ref=open(refFlat_filename,'r')
jun_transcript_ID_dict = {}
for refline in ref:
refline_list=refline.strip().split('\t')
exon_start_list=refline_list[9].strip(',').split(',')
exon_end_list=refline_list[10].strip(',').split(',')
jun_start = array(exon_end_list[:-1],dtype=int)
jun_end = array(exon_start_list[1:],dtype=int)
gene_id=refline_list[0]
transcript_id = refline_list[1]
chr_name = refline_list[2]
i = 0
for jun_end in exon_start_list[1:]:
jun_start = exon_end_list[i]
jun = chr_name + ":" + jun_start + "_" + jun_end
if not jun_transcript_ID_dict.has_key(jun):
jun_transcript_ID_dict[jun] = []
jun_transcript_ID_dict[jun].append(gene_id)
i+=1
ref.close()
################################################################################
target=open(gpd_filename,'r')
for line in target:
ls = line.strip().split('\t')
exon_start_list=ls[9].strip(',').split(',')
exon_end_list=ls[10].strip(',').split(',')
jun_start = array(exon_end_list[:-1],dtype=int)
jun_end = array(exon_start_list[1:],dtype=int)
gene_id=ls[0]
transcript_id = ls[1]
chr_name = ls[2]
transcript_candidate_ls = []
i = 0
for jun_end in exon_start_list[1:]:
jun_start = exon_end_list[i]
jun = chr_name + ":" + jun_start + "_" + jun_end
if jun_transcript_ID_dict.has_key(jun):
transcript_candidate_ls.extend(jun_transcript_ID_dict[jun])
i+=1
transcript_candidate_set = set(transcript_candidate_ls)
#############################################################################################
if len(transcript_candidate_set) > 0:
best_gene_N = 0
best_gene=""
for item in transcript_candidate_set:
if transcript_candidate_ls.count(item)>best_gene_N:
best_gene_N = transcript_candidate_ls.count(item)
best_gene = item
if not name_dt.has_key(transcript_id ):
name_dt[transcript_id ] = [gene_id,item,["-"]]
else:
name_dt[transcript_id ] = [gene_id,"-",["-"]]
target.close()
output = open(output_filename,'w')
for ID in name_dt:
for refID in name_dt[ID][2]:
output.write(ID+"\t"+"\t".join(name_dt[ID][0:2])+ "\t"+ refID +"\n")
output.close()
|
#
# Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Method:
GET = 0
PUT = 1
POST = 2
DELETE = 3
HEAD = 4
OPTIONS = 5
_VALUES_TO_NAMES = {
0: "GET",
1: "PUT",
2: "POST",
3: "DELETE",
4: "HEAD",
5: "OPTIONS",
}
_NAMES_TO_VALUES = {
"GET": 0,
"PUT": 1,
"POST": 2,
"DELETE": 3,
"HEAD": 4,
"OPTIONS": 5,
}
class Status:
CONT = 100
SWITCHING_PROTOCOLS = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIED = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
INSUFFICIENT_STORAGE = 506
_VALUES_TO_NAMES = {
100: "CONT",
101: "SWITCHING_PROTOCOLS",
200: "OK",
201: "CREATED",
202: "ACCEPTED",
203: "NON_AUTHORITATIVE_INFORMATION",
204: "NO_CONTENT",
205: "RESET_CONTENT",
206: "PARTIAL_CONTENT",
207: "MULTI_STATUS",
300: "MULTIPLE_CHOICES",
301: "MOVED_PERMANENTLY",
302: "FOUND",
303: "SEE_OTHER",
304: "NOT_MODIFIED",
305: "USE_PROXY",
307: "TEMPORARY_REDIRECT",
400: "BAD_REQUEST",
401: "UNAUTHORIZED",
402: "PAYMENT_REQUIRED",
403: "FORBIDDEN",
404: "NOT_FOUND",
405: "METHOD_NOT_ALLOWED",
406: "NOT_ACCEPTABLE",
407: "PROXY_AUTHENTICATION",
408: "REQUEST_TIMEOUT",
409: "CONFLICT",
410: "GONE",
411: "LENGTH_REQUIRED",
412: "PRECONDITION_FAILED",
413: "REQUEST_ENTITY_TOO_LARGE",
414: "REQUEST_URI_TOO_LONG",
415: "UNSUPPORTED_MEDIA_TYPE",
416: "REQUESTED_RANGE_NOT_SATISFIED",
417: "EXPECTATION_FAILED",
422: "UNPROCESSABLE_ENTITY",
423: "LOCKED",
424: "FAILED_DEPENDENCY",
500: "INTERNAL_SERVER_ERROR",
501: "NOT_IMPLEMENTED",
502: "BAD_GATEWAY",
503: "SERVICE_UNAVAILABLE",
504: "GATEWAY_TIMEOUT",
506: "INSUFFICIENT_STORAGE",
}
_NAMES_TO_VALUES = {
"CONT": 100,
"SWITCHING_PROTOCOLS": 101,
"OK": 200,
"CREATED": 201,
"ACCEPTED": 202,
"NON_AUTHORITATIVE_INFORMATION": 203,
"NO_CONTENT": 204,
"RESET_CONTENT": 205,
"PARTIAL_CONTENT": 206,
"MULTI_STATUS": 207,
"MULTIPLE_CHOICES": 300,
"MOVED_PERMANENTLY": 301,
"FOUND": 302,
"SEE_OTHER": 303,
"NOT_MODIFIED": 304,
"USE_PROXY": 305,
"TEMPORARY_REDIRECT": 307,
"BAD_REQUEST": 400,
"UNAUTHORIZED": 401,
"PAYMENT_REQUIRED": 402,
"FORBIDDEN": 403,
"NOT_FOUND": 404,
"METHOD_NOT_ALLOWED": 405,
"NOT_ACCEPTABLE": 406,
"PROXY_AUTHENTICATION": 407,
"REQUEST_TIMEOUT": 408,
"CONFLICT": 409,
"GONE": 410,
"LENGTH_REQUIRED": 411,
"PRECONDITION_FAILED": 412,
"REQUEST_ENTITY_TOO_LARGE": 413,
"REQUEST_URI_TOO_LONG": 414,
"UNSUPPORTED_MEDIA_TYPE": 415,
"REQUESTED_RANGE_NOT_SATISFIED": 416,
"EXPECTATION_FAILED": 417,
"UNPROCESSABLE_ENTITY": 422,
"LOCKED": 423,
"FAILED_DEPENDENCY": 424,
"INTERNAL_SERVER_ERROR": 500,
"NOT_IMPLEMENTED": 501,
"BAD_GATEWAY": 502,
"SERVICE_UNAVAILABLE": 503,
"GATEWAY_TIMEOUT": 504,
"INSUFFICIENT_STORAGE": 506,
}
class RestRequest:
"""
Attributes:
- method
- uri
- parameters
- headers
- body
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'method', None, None, ), # 1
(2, TType.STRING, 'uri', None, None, ), # 2
(3, TType.MAP, 'parameters', (TType.STRING, None, TType.STRING, None), None, ), # 3
(4, TType.MAP, 'headers', (TType.STRING, None, TType.STRING, None), None, ), # 4
(5, TType.STRING, 'body', None, None, ), # 5
)
def __init__(self, method=None, uri=None, parameters=None, headers=None, body=None,):
self.method = method
self.uri = uri
self.parameters = parameters
self.headers = headers
self.body = body
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.method = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uri = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.parameters = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString()
_val6 = iprot.readString()
self.parameters[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.headers = {}
(_ktype8, _vtype9, _size7) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString()
_val13 = iprot.readString()
self.headers[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.body = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RestRequest')
if self.method is not None:
oprot.writeFieldBegin('method', TType.I32, 1)
oprot.writeI32(self.method)
oprot.writeFieldEnd()
if self.uri is not None:
oprot.writeFieldBegin('uri', TType.STRING, 2)
oprot.writeString(self.uri)
oprot.writeFieldEnd()
if self.parameters is not None:
oprot.writeFieldBegin('parameters', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter14, viter15 in self.parameters.items():
oprot.writeString(kiter14)
oprot.writeString(viter15)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.headers is not None:
oprot.writeFieldBegin('headers', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter16, viter17 in self.headers.items():
oprot.writeString(kiter16)
oprot.writeString(viter17)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body is not None:
oprot.writeFieldBegin('body', TType.STRING, 5)
oprot.writeString(self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.method is None:
raise TProtocol.TProtocolException(message='Required field method is unset!')
if self.uri is None:
raise TProtocol.TProtocolException(message='Required field uri is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RestResponse:
"""
Attributes:
- status
- headers
- body
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'status', None, None, ), # 1
(2, TType.MAP, 'headers', (TType.STRING, None, TType.STRING, None), None, ), # 2
(3, TType.STRING, 'body', None, None, ), # 3
)
def __init__(self, status=None, headers=None, body=None,):
self.status = status
self.headers = headers
self.body = body
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.status = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.headers = {}
(_ktype19, _vtype20, _size18) = iprot.readMapBegin()
for _i22 in xrange(_size18):
_key23 = iprot.readString()
_val24 = iprot.readString()
self.headers[_key23] = _val24
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.body = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RestResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.I32, 1)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
if self.headers is not None:
oprot.writeFieldBegin('headers', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))
for kiter25, viter26 in self.headers.items():
oprot.writeString(kiter25)
oprot.writeString(viter26)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.body is not None:
oprot.writeFieldBegin('body', TType.STRING, 3)
oprot.writeString(self.body)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.status is None:
raise TProtocol.TProtocolException(message='Required field status is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
from django.conf.urls.defaults import *
from alva.models import Idea
idea_info_dict = {
'queryset': Idea.live.all(),
'date_field': 'think_date',
}
urlpatterns = patterns('django.views.generic.date_based',
(r'^$', 'archive_index', idea_info_dict, 'alva_idea_archive_index'),
(r'^(?P<year>\d{4})/$', 'archive_year', idea_info_dict, 'alva_idea_archive_year'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', 'archive_month', idea_info_dict, 'alva_idea_archive_month'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', 'archive_day', idea_info_dict, 'alva_idea_archive_day'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', 'object_detail', idea_info_dict, 'alva_idea_detail'),
)
|
from biosimulators_utils.simulator_registry import data_model
import unittest
class DataModelTestCase(unittest.TestCase):
def test(self):
sim1 = data_model.SimulatorSubmission(
id='tellurium',
version='2.1.6',
specifications_url='https://raw.githubusercontent.com/biosimulators/Biosimulators_tellurium/dev/biosimulators.json',
specifications_patch={'version': '2.1.5'},
)
sim2 = data_model.SimulatorSubmission(
id='tellurium',
version='2.1.6',
specifications_url='https://raw.githubusercontent.com/biosimulators/Biosimulators_tellurium/dev/biosimulators.json',
specifications_patch={'version': '2.1.5'},
)
sim3 = data_model.SimulatorSubmission(
id='tellurium',
version='2.1.5',
specifications_url='https://raw.githubusercontent.com/biosimulators/Biosimulators_tellurium/dev/biosimulators.json',
specifications_patch={'version': '2.1.5'},
)
self.assertEqual(sim1.to_tuple(), (sim1.id, sim1.version, sim1.specifications_url, sim1.specifications_patch,
sim1.validate_image, sim1.commit_simulator,
))
self.assertTrue(sim1.is_equal(sim2))
self.assertFalse(sim1.is_equal(sim3))
|
#!/bin/sh
import os
import sys
from glob import glob
import shutil
THIS_RECIPE = os.getenv('RECIPE_DIR', '')
conda_tools_dir = os.path.join(THIS_RECIPE, '..', 'conda_tools')
print('conda_tools_dir', conda_tools_dir)
sys.path.insert(0, conda_tools_dir)
import utils # conda_tools
import copy_ambertools
def main():
PREFIX = os.getenv('PREFIX')
AMBERHOME = os.getcwd()
os.environ['AMBERHOME'] = AMBERHOME
copy_ambertools.main()
ATPY2 = utils.get_package_dir(
conda_recipe=os.path.join(THIS_RECIPE, '..', 'conda-ambertools-single-python'), py=2.7)
print("ATPY2", ATPY2, 'exists = ', os.path.exists(ATPY2))
utils.tar_xf(ATPY2)
utils.patch(os.path.join(THIS_RECIPE, 'patch'))
utils.update_amber()
utils.set_compiler_env()
utils.run_configure()
os.chdir('AmberTools/src')
utils.make_python_serial()
os.chdir(AMBERHOME)
python_ver = ".".join(map(str, sys.version_info[:2]))
prefix_bin = os.path.join(PREFIX, 'bin')
shutil.copy('{}/bin/pdb4amber'.format(AMBERHOME), prefix_bin)
shutil.copy('{}/bin/parmed'.format(AMBERHOME), prefix_bin)
for fn in glob('{}/lib/*'.format(AMBERHOME)):
# only need some libraries for pytraj/libcpptraj
if os.path.isfile(fn):
shutil.copy(fn, '{}/lib/'.format(PREFIX))
utils.sh('cp -rf {}/lib/python{} {}/lib/'.format(AMBERHOME, python_ver, PREFIX))
shutil.rmtree('./info')
if __name__ == '__main__':
main()
|
from minerva import mutils
from twisted.application import service, strports
from twisted.python.filepath import FilePath
from webmagic.filecache import FileCache
from cardboard.web import site
def makeService(config):
from twisted.internet import reactor
multi = service.MultiService()
domain = config['domain']
mutils.maybeWarnAboutDomain(reactor, domain)
closureLibrary = FilePath(config['closure-library'])
mutils.maybeWarnAboutClosureLibrary(reactor, closureLibrary)
socketPorts = []
for minervaStrport in config['minerva']:
_, _args, _ = strports.parse(minervaStrport, object())
socketPorts.append(_args[0])
fileCache = FileCache(lambda: reactor.seconds(), -1)
stf, httpSite = site.setupMinerva(
reactor, fileCache, socketPorts, domain, closureLibrary
)
httpSite.displayTracebacks = not config["no-tracebacks"]
for httpStrport in config['http']:
httpServer = strports.service(httpStrport, httpSite)
httpServer.setServiceParent(multi)
for minervaStrport in config['minerva']:
minervaServer = strports.service(minervaStrport, stf)
minervaServer.setServiceParent(multi)
return multi
|
import numpy as np
# Mirror all frames in a (N, S, H, W) dataset, returning the augmented data
# axis = 2 for vertical (flip row order), 3 for horizontal (flip column order)
def aug_mirror(dataset, axis):
return np.flip(dataset, axis=axis)
# Rotate 90 degrees CCW, the integer number of times specified
# Rotates 1 time by default
def aug_rotate_90(dataset, times=1):
return np.rot90(dataset, times, axes=(2, 3))
# Helper function from randomly sampling from a range
def random_sample_range(shape, min, max):
# Scale the uniform distribution to sample the specified range
scale = max - min
shift = min
return scale * np.random.random_sample(shape) + shift
# Scale each sequence's values by a random constant
def aug_rand_scale(dataset, min_scale_factor=0.5, max_scale_factor=1.5):
# Sample scale factors and scale dataset
scale_factors = random_sample_range(
dataset.shape[0], min_scale_factor, max_scale_factor
)
scale_factors = scale_factors.reshape((-1, 1, 1, 1))
return scale_factors * dataset
# Offset each sequence's values by a random constant
def aug_rand_offset(dataset, min_offset=-1, max_offset=1):
# Sample offsets and offset dataset
offsets = random_sample_range(dataset.shape[0], min_offset, max_offset)
offsets = offsets.reshape((-1, 1, 1, 1))
return offsets + dataset
# Scale and offsets each sequence by random constants
# clarification: within each sequence, multiply by the same constant
def aug_rand_affine(dataset):
return aug_rand_offset(aug_rand_scale(dataset))
# Random affine transofmration
# Transforms the input dataset so that everything is between two randomly
# chosen limits, within the range [0,1)
def aug_random_affine_norm(dataset):
# Generate two limits into which to shift and scale the data
# lower limits row 0, upper limits row 1
rand_limits = np.sort(np.random.random_sample(size=(2, dataset.shape[0])), axis=0)
# Get mins and maxes of each sequence
seq_mins = dataset.min(axis=(1, 2, 3))
seq_maxes = dataset.max(axis=(1, 2, 3))
# Scale
data_spreads = seq_maxes - seq_mins
limit_spreads = rand_limits[1, :] - rand_limits[0, :]
scale_factors = data_spreads / limit_spreads
dataset /= scale_factors.reshape((-1, 1, 1, 1))
# Shift
seq_mins /= scale_factors
shift_factors = rand_limits[0, :] - seq_mins
dataset += shift_factors.reshape((-1, 1, 1, 1))
return dataset
# Randomly chooses pairs of sequences and adds them
# N: number of examples in output dataset
# Returns an augmented dataset the same shape as the input data
def aug_add_random_pairs(dataset, out_size=-1):
# Handle default value
if out_size == -1:
out_size = dataset.shape[0]
# Get two N-length lists of indicies, from [0, N)
indices_0 = np.random.randint(0, dataset.shape[0], size=out_size)
indices_1 = np.random.randint(0, dataset.shape[0], size=out_size)
return dataset[indices_0] + dataset[indices_1]
def augment(dataset, sum_affine_aug, num_sum_aug, rand_seed):
orig_dataset_size = dataset.shape[0]
np.random.seed(rand_seed)
# Python list containing all augmentations, to contatenate at end
aug_list = []
aug_list.append(dataset.copy())
# Normalized random-affine augmentations
for _ in range(0, sum_affine_aug):
aug_list.append(aug_random_affine_norm(dataset)) # Random affine
dataset = np.vstack(aug_list)
# Random-pair-sum augmentations, inside normalized random-affine augmentations
dataset = np.vstack((dataset, aug_random_affine_norm(aug_add_random_pairs(dataset, orig_dataset_size * num_sum_aug))))
return dataset
|
from django.db.models import Q
from django.test import TestCase
from django.contrib.auth.models import User, Group
from django.db.models.signals import post_save
from guardian.models import UserObjectPermission, GroupObjectPermission
from governor.shortcuts import (get_users_eligible_for_object,
get_groups_eligible_for_object)
from governor.receivers import (setup_users_eligible_for_object,
setup_groups_eligible_for_object, setup_objects_eligible_for_user,
setup_objects_eligible_for_group)
from governor import roles
from .models import Profile, Newspaper, Article
@roles.register('tests.review_article', 'tests.preview_article')
class EditorRole(object):
def users_eligible_for_object(self, obj, perm):
# Global editors..
global_editors = Q(profile__is_editor=True)
# Editors assigned to the newspaper..
newspaper_editors = Q(pk__in=obj.newspaper.editors.all())
# Article editor..
article_editor = Q(pk=obj.editor_id)
# Author of the article..
article_author = Q(pk=obj.author_id)
return User.objects.filter(global_editors | newspaper_editors |
article_editor | article_author)
def groups_eligible_for_object(self, obj, perm):
return Group.objects.filter(name='Editors')
def objects_eligible_for_user(self, user, perm):
articles = Article.objects.all()
# Gloabl editor or in the editor group..
if user.profile.is_editor or user.groups.filter(name='Editors').exists():
return articles
# Articles the user is the author or editor for..
return articles.filter(Q(author=user) | Q(editor=user))
def objects_eligible_for_group(self, group, perm):
if group.name == 'Editors':
return Article.objects.all()
class RoleTestCase(TestCase):
def setUp(self):
self.group = Group(name='Editors')
self.group.save()
self.user1 = User(username='user1')
self.user1.save()
Profile(user=self.user1, is_editor=False).save()
self.user2 = User(username='user2')
self.user2.save()
Profile(user=self.user2, is_editor=False).save()
self.user3 = User(username='user3')
self.user3.save()
Profile(user=self.user3, is_editor=False).save()
self.user4 = User(username='user4')
self.user4.save()
Profile(user=self.user4, is_editor=False).save()
self.user4.groups.add(self.group)
self.editor1 = User(username='editor1')
self.editor1.save()
Profile(user=self.editor1, is_editor=True).save()
self.editor2 = User(username='editor2')
self.editor2.save()
Profile(user=self.editor2, is_editor=True).save()
self.newspaper = Newspaper(name='NYT')
self.newspaper.save()
self.newspaper.editors.add(self.editor1)
self.article = Article(newspaper=self.newspaper, author=self.user1,
editor=self.editor1)
self.article.save()
def test_registry(self):
self.assertEqual(len(roles.get_perms_for_role(EditorRole)), 2)
def test_shortcuts_noop(self):
users = get_users_eligible_for_object('tests.add_article', self.article)
groups = get_groups_eligible_for_object('tests.add_article', self.article)
self.assertEqual(len(users), 0)
self.assertEqual(len(groups), 0)
def test_shortcuts(self):
users = get_users_eligible_for_object('tests.review_article', self.article)
self.assertEqual(len(users), 3)
self.assertEqual(len(set(users)), 3)
pks = [self.user1.pk, self.editor1.pk, self.editor2.pk]
self.assertTrue(all([u.pk in pks for u in users]))
groups = get_groups_eligible_for_object('tests.review_article', self.article)
self.assertEqual(groups[0], self.group)
def test_receivers(self):
receiver1 = setup_users_eligible_for_object('tests.review_article')
post_save.connect(receiver1, sender=Article)
receiver2 = setup_groups_eligible_for_object('tests.review_article')
post_save.connect(receiver2, sender=Article)
receiver3 = setup_objects_eligible_for_user('tests.review_article')
post_save.connect(receiver3, sender=User)
receiver4 = setup_objects_eligible_for_group('tests.review_article')
post_save.connect(receiver4, sender=Group)
self.assertEqual(UserObjectPermission.objects.count(), 0)
self.assertEqual(GroupObjectPermission.objects.count(), 0)
article = Article(newspaper=self.newspaper, author=self.user1,
editor=self.editor1)
article.save()
self.assertEqual(UserObjectPermission.objects.count(), 3)
self.assertEqual(GroupObjectPermission.objects.count(), 1)
# user4 has permission because of the group..
for u in (self.user1, self.editor1, self.editor2, self.user4):
self.assertTrue(u.has_perm('tests.review_article', article))
article = Article(newspaper=self.newspaper, author=self.user2,
editor=self.editor1)
article.save()
for u in (self.user2, self.editor1, self.editor2, self.user4):
self.assertTrue(u.has_perm('tests.review_article', article))
|
#!/usr/bin/env python3
from .proc_base import ProcBase
class CpuStats:
'''
Represents a single CPU entry from the /proc/stat file.
'''
# Format string for a table entry
table_format_str = '| {0:>6} | {1:>9} | {2:>5} | {3:>11} | {4:>8} |' \
' {5:>11} | {6:>5} | {percent:7.2f} |'
def __init__(self, line):
'''
Parse line read from /proc/stat to get CPU
execution time breakdown.
Keyword arguments:
line -- A single line read from /proc/stat starting with cpu[0-9]*
'''
split = line.split()
if split[0][-1].isdigit():
self.label = '#' + split[0][-1]
self.index = int(split[0][-1])
else:
self.label = 'All'
self.index = -1
self._entries = [int(entry) for entry in split[1:]]
def get_total(self):
'''
Returns total amount of CPU time
summed across all activities.
'''
return sum(self._entries)
def dump_table_entry(self, _percent):
'''
Prints a single table line.
Keyword arguments:
_percent -- CPUs percentage of total computation time
'''
print(CpuStats.table_format_str.format(
self.label, *self._entries, percent=_percent))
class ProcStat(ProcBase):
'''Object represents the /proc/stat file.'''
def __init__(self):
'''
Read file by calling base class constructor
then parse the contents.
'''
self.cpus = []
self.stats = []
super().__init__('/proc/stat')
self.read()
def read(self):
'''Parses contents of /proc/stat'''
# Iterate over each line of the file
for line in self.content.split('\n'):
tokens = line.split()
if not tokens:
continue
if line.startswith('cpu'):
# Parse cpu details using CpuStats class
self.cpus.append(CpuStats(line))
elif tokens[0] == 'ctxt':
self.stats.append(('Number of context switches:', tokens[-1]))
elif tokens[0] == 'btime':
self.stats.append(
('Boot time in seconds since epoch:', tokens[-1]))
elif tokens[0] == 'processes':
self.stats.append(
('Number of processes forked since boot:', tokens[-1]))
elif tokens[0] == 'procs_running':
self.stats.append(
('Number of processes in a runnable state:', tokens[-1]))
elif tokens[0] == 'procs_blocked':
self.stats.append(
('Number of processes blocked on I/O:', tokens[-1]))
def _dump_cpu_stat_table(self):
'''Print table of CPU time stats.'''
table_heading_str = '| CPU ID | User Land | Niced | System Land |' \
' Idle | I/O blocked | IRQ | % |'
print(table_heading_str)
print('-' * len(table_heading_str))
total_usage = max([cpu.get_total() for cpu in self.cpus])
for cpu in self.cpus:
percent = (cpu.get_total() / total_usage) * 100
cpu.dump_table_entry(percent)
def dump(self):
'''Print information gathered to stdout.'''
super().dump() # Print file header
for (msg, num) in self.stats:
print(msg, num)
print('\n') # Double new line
self._dump_cpu_stat_table()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
import sys
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--role', type=int, default=0, help='role')
parser.add_argument('--server', type=str, default='localhost', help='server ip')
parser.add_argument('--port', type=int, default=12345, help='server port')
parser.add_argument('--epochs', type=int, default=5, help='epochs')
parser.add_argument('--test_epoch', type=int, default=5, help='test_epoch')
parser.add_argument('--dataset_size', type=int, default=6040, help='dataset_size')
parser.add_argument('--batch_size', type=int, default=10, help='batch_size')
parser.add_argument('--batch_num', type=int, default=600, help='batch_num')
parser.add_argument('--use_gpu', type=int, default=0, help='whether using gpu')
parser.add_argument('--mpc_data_dir', type=str, default='./mpc_data/', help='mpc_data_dir')
parser.add_argument('--model_dir', type=str, default='./model_dir/', help='model_dir')
parser.add_argument('--watch_vec_size', type=int, default=64, help='watch_vec_size')
parser.add_argument('--search_vec_size', type=int, default=64, help='search_vec_size')
parser.add_argument('--other_feat_size', type=int, default=32, help='other_feat_size')
parser.add_argument('--output_size', type=int, default=3952, help='output_size')
parser.add_argument('--base_lr', type=float, default=0.004, help='base_lr')
parser.add_argument('--topk', type=int, default=10, help='topk')
args = parser.parse_args()
return args
|
from sympy import symbols, lambdify, diff, sqrt, I
from sympy import besselj, hankel1, atan2, exp, pi, tanh
import scipy.special as scp
import numpy as np
from scipy.sparse.linalg import gmres
# gmres iteration counter
# https://stackoverflow.com/questions/33512081/getting-the-number-of-iterations-of-scipys-gmres-iterative-method
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
counter = gmres_counter()
def sommerfeld(k, beta, interval, exp_type):
"""
evaluates Sommerfeld term either full or partial based on argument provided
"""
x_1, y_1, x_2, y_2, ny_1, ny_2 = symbols("x_1, y_1, x_2, y_2, ny_1, ny_2")
som_int_sp = 0
t_max = int(k) + interval
C = 1
for t in range(-t_max, t_max + 1):
lam = t - I * tanh(t)
d_lam = 1 - I * (1 - tanh(t) ** 2)
f = (lam ** 2 - k ** 2) ** 0.5
if exp_type == "full":
term = (
exp(-f * (x_2 + y_2))
* exp(I * lam * (x_1 - y_1))
* (f + I * beta)
/ (f - I * beta)
/ f
* d_lam
/ 4
/ pi
)
else:
term = (
exp(-f * (x_2 + y_2))
* exp(I * lam * (x_1 - y_1))
* exp((-f + I * beta) * C)
/ (f - I * beta)
/ f
* d_lam
* I * beta
/ 2
/ pi
)
if t == -t_max or t == t_max:
som_int_sp += 0.5 * term
else:
som_int_sp += term
som_sp = lambdify([x_1, x_2, y_1, y_2], som_int_sp)
som_int_dp = ny_1 * diff(som_int_sp, y_1) + ny_2 * diff(som_int_sp, y_2)
som_dp = lambdify([x_1, x_2, y_1, y_2, ny_1, ny_2], som_int_dp)
return som_sp, som_dp
def eval_sp_dp_QBX(order, k):
"""
evaluates single layer, double layer and corr QBX coeffs for Helmholtz
calculates image potential for y = -y - eta as well
"""
x_1, y_1, x_2, y_2, eta = symbols("x_1, y_1, x_2, y_2, eta")
nx_1, nx_2, ny_1, ny_2, r = symbols("nx_1, nx_2, ny_1, ny_2, r")
dist = sqrt((x_1 - y_1) ** 2 + (x_2 - y_2) ** 2)
kernel = I / 4 * hankel1(0, k * dist)
single_layer = lambdify([x_1, x_2, y_1, y_2], kernel)
green_normal_der = ny_1 * diff(kernel, y_1) + ny_2 * diff(kernel, y_2)
double_layer = lambdify([x_1, x_2, y_1, y_2, ny_1, ny_2], green_normal_der)
# image in y=0 calculations
image_dist = sqrt((x_1 - y_1) ** 2 + (x_2 + y_2 + eta) ** 2)
image_kernel = I / 4 * hankel1(0, k * image_dist)
image_single_layer = lambdify([x_1, x_2, y_1, y_2, eta], image_kernel)
image_green_normal_der = ny_1 * diff(image_kernel, y_1) + ny_2 * diff(
image_kernel, y_2
)
image_double_layer = lambdify(
[x_1, x_2, y_1, y_2, eta, ny_1, ny_2], image_green_normal_der
)
# Grafs theorem term evaluations
c_1 = x_1 + nx_1 * r
c_2 = x_2 + nx_2 * r
xc = sqrt((x_1 - c_1) ** 2 + (x_2 - c_2) ** 2)
yc = sqrt((y_1 - c_1) ** 2 + (y_2 - c_2) ** 2)
x_theta = atan2((x_2 - c_2), (x_1 - c_1))
y_theta = atan2((y_2 - c_2), (y_1 - c_1))
img_yc = sqrt((y_1 - c_1) ** 2 + (-(y_2 + eta) - c_2) ** 2)
img_y_theta = atan2((-(y_2 + eta) - c_2), (y_1 - c_1))
# single layer expansion zeroth order term
qbx_exp_slp = I / 4 * hankel1(0, k * yc) * besselj(0, k * xc)
img_qbx_exp_slp = I / 4 * hankel1(0, k * img_yc) * besselj(0, k * xc)
for i in range(1, order + 1):
qbx_exp_slp += (
I
/ 4
* (
hankel1(i, k * yc)
* exp(I * i * y_theta)
* besselj(i, k * xc)
* exp(-I * i * x_theta)
)
)
qbx_exp_slp += (
I
/ 4
* (
hankel1(-i, k * yc)
* exp(-I * i * y_theta)
* besselj(-i, k * xc)
* exp(I * i * x_theta)
)
)
img_qbx_exp_slp += (
I
/ 4
* (
hankel1(i, k * img_yc)
* exp(I * i * img_y_theta)
* besselj(i, k * xc)
* exp(-I * i * x_theta)
)
)
img_qbx_exp_slp += (
I
/ 4
* (
hankel1(-i, k * img_yc)
* exp(-I * i * img_y_theta)
* besselj(-i, k * xc)
* exp(I * i * x_theta)
)
)
qbx_exp_dlp = ny_1 * diff(qbx_exp_slp, y_1) + ny_2 * diff(qbx_exp_slp, y_2)
exp_term_slp = lambdify(
[x_1, x_2, y_1, y_2, nx_1, nx_2, ny_1, ny_2, r], qbx_exp_slp
)
exp_term_dlp = lambdify(
[x_1, x_2, y_1, y_2, nx_1, nx_2, ny_1, ny_2, r], qbx_exp_dlp
)
img_qbx_exp_dlp = ny_1 * diff(img_qbx_exp_slp, y_1) + ny_2 * diff(
img_qbx_exp_slp, y_2
)
img_exp_term_slp = lambdify(
[x_1, x_2, y_1, y_2, eta, nx_1, nx_2, ny_1, ny_2, r], img_qbx_exp_slp
)
img_exp_term_dlp = lambdify(
[x_1, x_2, y_1, y_2, eta, nx_1, nx_2, ny_1, ny_2, r], img_qbx_exp_dlp
)
return (
single_layer,
double_layer,
exp_term_slp,
exp_term_dlp,
image_single_layer,
image_double_layer,
img_exp_term_slp,
img_exp_term_dlp,
)
class Images_Integral:
def __init__(self, m, beta, img_sp, img_dp):
self.m = m
self.beta = beta
self.img_sp = img_sp
self.img_dp = img_dp
def eval_integral(self, targets, sources, source_normal_x, source_normal_y):
"""
evaluates the sum of integral of images on (m+1) dyadic intervals
"""
C = 1
dyad = 2 ** np.arange(-self.m, C, dtype=float)
dyadic_int = np.insert(dyad, 0, 0.0)
npoints = 8
ref_info = scp.legendre(npoints).weights
ref_nodes = ref_info[:, 0]
ref_weights = ref_info[:, 2]
image_nodes = np.zeros((self.m + 1, npoints))
image_weights = np.zeros((self.m + 1, npoints))
for i in range(self.m + 1):
a, b = dyadic_int[i : i + 2]
image_nodes[i] = ref_nodes * (b - a) * 0.5 + (b + a) * 0.5
image_weights[i] = 0.5 * (b - a) * ref_weights
image_nodes = image_nodes.reshape(-1)
image_weights = image_weights.reshape(-1)
# Neumann condition image
sp_sum_int = self.img_sp(targets[0], targets[1], sources[0], sources[1], 0)
dp_sum_int = self.img_dp(
targets[0],
targets[1],
sources[0],
sources[1],
0,
source_normal_x,
source_normal_y,
)
for i in range((self.m + 1) * npoints):
sp_sum_int += (
2
* self.beta
* 1j
* self.img_sp(
targets[0], targets[1], sources[0], sources[1], image_nodes[i]
)
* np.exp(1j * self.beta * image_nodes[i])
) * image_weights[i]
dp_sum_int += (
2
* self.beta
* 1j
* self.img_dp(
targets[0],
targets[1],
sources[0],
sources[1],
image_nodes[i],
source_normal_x,
source_normal_y,
)
* np.exp(1j * self.beta * image_nodes[i])
* image_weights[i]
)
return sp_sum_int, dp_sum_int
def bvp(n, k, domain, alpha, qbx_exp_slp, qbx_exp_dlp, rhs, **kwargs):
"""
solves the BVP for density
"""
normals_x, normals_y = domain.normals.reshape(2, -1)
nodes_x, nodes_y = domain.curve_nodes.reshape(2, -1)
# taking exp_radius as panel_length * 5 from QBX paper for controlled precision convg
# qbx_radius = np.repeat(domain.panel_lengths, domain.npoints) ** 0.5
qbx_radius = np.repeat(domain.panel_lengths, domain.npoints) * 0.5
total_points = nodes_x.shape[0]
normal_mat_x = np.broadcast_to(normals_x, (total_points, total_points))
normal_mat_y = np.broadcast_to(normals_y, (total_points, total_points))
node_mat_x = np.broadcast_to(nodes_x, (total_points, total_points))
node_mat_y = np.broadcast_to(nodes_y, (total_points, total_points))
radius_mat = np.broadcast_to(qbx_radius, (total_points, total_points)).T
# take care of normal signs here
D_qbx_int = qbx_exp_dlp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
-normal_mat_x.T,
-normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
D_qbx_ext = qbx_exp_dlp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x.T,
normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
S_qbx = qbx_exp_slp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x.T,
normal_mat_y.T,
normal_mat_x,
normal_mat_y,
radius_mat,
) * domain.curve_weights.reshape(-1)
rhs = rhs.reshape(-1)
# averaging interior exterior limits
A = (D_qbx_int + D_qbx_ext) * 0.5 + 0.5 * np.identity(total_points)
# or compute one side use jump relations: less work and faster
# has spectral convergence problems though
# A = D_qbx_int + np.identity(total_points)
A -= alpha * S_qbx * 1j
# adding images and sommerfeld contribution
if ("som_sp" in kwargs.keys()) and ("som_dp" in kwargs.keys()):
som_sp = kwargs["som_sp"]
som_dp = kwargs["som_dp"]
S_som = som_sp(
node_mat_x.T, node_mat_y.T, node_mat_x, node_mat_y
) * domain.curve_weights.reshape(-1)
D_som = som_dp(
node_mat_x.T,
node_mat_y.T,
node_mat_x,
node_mat_y,
normal_mat_x,
normal_mat_y,
) * domain.curve_weights.reshape(-1)
# A += S_som
A += D_som - alpha * 1j * S_som
if "imgs" in kwargs.keys():
imgs = kwargs["imgs"]
S_img, D_img = imgs.eval_integral(
(node_mat_x.T, node_mat_y.T),
(node_mat_x, node_mat_y),
normal_mat_x,
normal_mat_y,
) * domain.curve_weights.reshape(-1)
# A += S_img
A += D_img - alpha * 1j * S_img
soln_density, msg = gmres(A, rhs, tol=1e-11, callback=counter)
print("GMRES iter:", counter.niter)
return soln_density.reshape(n, -1)
def eval_target(
targets, sources, weights, source_normals, density, sp, dp, alpha, **kwargs
):
"""
evaluates the potential at target locations
"""
normals_x, normals_y = source_normals.reshape(2, -1)
nodes_x, nodes_y = sources.reshape(2, -1)
target_number = targets.shape[1]
total_points = nodes_x.shape[0]
test_normal_mat_x = np.broadcast_to(normals_x, (target_number, total_points))
test_normal_mat_y = np.broadcast_to(normals_y, (target_number, total_points))
sources_mat_x = np.broadcast_to(nodes_x, (target_number, total_points))
sources_mat_y = np.broadcast_to(nodes_y, (target_number, total_points))
targets_mat_x = np.broadcast_to(targets[0], (total_points, target_number)).T
targets_mat_y = np.broadcast_to(targets[1], (total_points, target_number)).T
D = dp(
targets_mat_x,
targets_mat_y,
sources_mat_x,
sources_mat_y,
test_normal_mat_x,
test_normal_mat_y,
) * weights.reshape(-1)
S = sp(
targets_mat_x, targets_mat_y, sources_mat_x, sources_mat_y
) * weights.reshape(-1)
DLP_eval = (D - alpha * S * 1j) @ density.reshape(-1)
# adding images and sommerfeld contribution
if ("som_sp" in kwargs.keys()) and ("som_dp" in kwargs.keys()):
som_sp = kwargs["som_sp"]
som_dp = kwargs["som_dp"]
S_som = som_sp(
targets_mat_x, targets_mat_y, sources_mat_x, sources_mat_y
) * weights.reshape(-1)
D_som = som_dp(
targets_mat_x,
targets_mat_y,
sources_mat_x,
sources_mat_y,
test_normal_mat_x,
test_normal_mat_y,
) * weights.reshape(-1)
# DLP_eval += S_som @ density.reshape(-1)
DLP_eval += (D_som - alpha * S_som * 1j) @ density.reshape(-1)
if "imgs" in kwargs.keys():
imgs = kwargs["imgs"]
S_img, D_img = imgs.eval_integral(
(targets_mat_x, targets_mat_y),
(sources_mat_x, sources_mat_y),
test_normal_mat_x,
test_normal_mat_y,
) * weights.reshape(-1)
# DLP_eval += S_img @ density.reshape(-1)
DLP_eval += (D_img - alpha * S_img * 1j) @ density.reshape(-1)
return DLP_eval
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from manga_py import main
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import json
import traceback
import logging
import copy
import inspect
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs # type: ignore
from requests.status_codes import _codes
from tavern.schemas.extensions import get_wrapped_response_function
from tavern.util.dict_util import recurse_access_key, deep_dict_merge, assign_value
from tavern.util.exceptions import TestFailError
from tavern.response.base import BaseResponse, indent_err_text
from tavern.testutils import comparators
logger = logging.getLogger(__name__)
comparators_functions = inspect.getmembers(comparators, inspect.isfunction)
validate_rule = {}
class RestResponse(BaseResponse):
def __init__(self, session, name, expected, test_block_config):
# pylint: disable=unused-argument
super(RestResponse, self).__init__()
defaults = {
'status_code': 200
}
self.name = name
body = expected.get("body") or {}
if "$ext" in body:
self.validate_function = get_wrapped_response_function(body["$ext"])
else:
self.validate_function = None
self.expected = deep_dict_merge(defaults, expected)
self.response = None
self.test_block_config = test_block_config
self.tavern_box = self.test_block_config["variables"]["tavern"]
self.status_code = None
def check_code(code):
if code not in _codes:
logger.warning("Unexpected status code '%s'", code)
if isinstance(self.expected["status_code"], int):
check_code(self.expected["status_code"])
else:
for code in self.expected["status_code"]:
check_code(code)
def __str__(self):
if self.response:
return self.response.text.strip()
else:
return "<Not run yet>"
def _verbose_log_response(self, response):
"""Verbosely log the response object, with query params etc."""
logger.info("Response: '%s'", response)
def log_dict_block(block, name):
if block:
to_log = name + ":"
if isinstance(block, list):
for v in block:
to_log += "\n - {}".format(v)
elif isinstance(block, dict):
for k, v in block.items():
to_log += "\n {}: {}".format(k, v)
else:
to_log += "\n {}".format(block)
logger.debug(to_log)
log_dict_block(response.headers, "Headers")
try:
log_dict_block(response.json(), "Body")
except ValueError:
pass
redirect_query_params = self._get_redirect_query_params(response)
if redirect_query_params:
parsed_url = urlparse(response.headers["location"])
to_path = "{0}://{1}{2}".format(*parsed_url)
logger.debug("Redirect location: %s", to_path)
log_dict_block(redirect_query_params, "Redirect URL query parameters")
def _get_redirect_query_params(self, response):
"""If there was a redirect header, get any query parameters from it
"""
try:
redirect_url = response.headers["location"]
except KeyError as e:
if "redirect_query_params" in self.expected.get("save", {}):
self._adderr("Wanted to save %s, but there was no redirect url in response",
self.expected["save"]["redirect_query_params"], e=e)
redirect_query_params = {}
else:
parsed = urlparse(redirect_url)
qp = parsed.query
redirect_query_params = {i:j[0] for i, j in parse_qs(qp).items()}
return redirect_query_params
def _check_status_code(self, status_code, body):
expected_code = self.expected["status_code"]
if (isinstance(expected_code, int) and status_code == expected_code) or \
(isinstance(expected_code, list) and (status_code in expected_code)):
logger.debug("Status code '%s' matched expected '%s'", status_code, expected_code)
return
else:
if 400 <= status_code < 500:
# special case if there was a bad request. This assumes that the
# response would contain some kind of information as to why this
# request was rejected.
self._adderr("Status code was %s, expected %s:\n%s",
status_code, expected_code,
indent_err_text(json.dumps(body)),
)
else:
self._adderr("Status code was %s, expected %s",
status_code, expected_code)
def generate_validate_rule(self):
for (name, func) in comparators_functions:
validate_rule[name] = func
def verify(self, response):
"""Verify response against expected values and returns any values that
we wanted to save for use in future requests
There are various ways to 'validate' a block - a specific function, just
matching values, validating a schema, etc...
Args:
response (requests.Response): response object
Returns:
dict: Any saved values
Raises:
TestFailError: Something went wrong with validating the response
"""
# pylint: disable=too-many-statements
self._verbose_log_response(response)
self.response = response
self.status_code = response.status_code
request = self.tavern_box.request_vars
try:
body = dict(response.json())
except ValueError:
body = None
self._check_status_code(response.status_code, body)
if self.validate_function:
try:
self.validate_function(response)
except Exception as e: #pylint: disable=broad-except
self._adderr("Error calling validate function '%s':\n%s",
self.validate_function.func,
indent_err_text(traceback.format_exc()),
e=e)
if not validate_rule:
self.generate_validate_rule()
# validate
if "validate" in self.expected:
validate = self.expected["validate"]
assign_value(expected = validate, request = request, response = body, context = self.test_block_config["variables"])
for i in validate:
for key, value in i.items():
try:
validate_rule[key](*value)
except Exception as e:
raise AssertionError("{} error, expected {} got {}".format(key, *value))
# Get any keys to save
saved = {}
redirect_query_params = self._get_redirect_query_params(response)
saved.update(self._save_value("request", request))
saved.update(self._save_value("body", body))
saved.update(self._save_value("headers", response.headers))
saved.update(self._save_value("redirect_query_params", redirect_query_params))
for cookie in self.expected.get("cookies", []):
if cookie not in response.cookies:
self._adderr("No cookie named '%s' in response", cookie)
try:
wrapped = get_wrapped_response_function(self.expected["save"]["$ext"])
except KeyError:
logger.debug("No save function for this stage")
else:
try:
to_save = wrapped(response)
except Exception as e: #pylint: disable=broad-except
self._adderr("Error calling save function '%s':\n%s",
wrapped.func,
indent_err_text(traceback.format_exc()),
e=e)
else:
if isinstance(to_save, dict):
saved.update(to_save)
elif to_save is not None:
self._adderr("Unexpected return value '%s' from $ext save function")
self._validate_block("body", body)
self._validate_block("headers", response.headers)
self._validate_block("redirect_query_params", redirect_query_params)
if self.errors:
raise TestFailError("Test '{:s}' failed:\n{:s}".format(self.name, self._str_errors()), failures=self.errors)
return saved
def _validate_block(self, blockname, block):
"""Validate a block of the response
Args:
blockname (str): which part of the response is being checked
block (dict): The actual part being checked
"""
try:
expected_block = self.expected[blockname] or {}
except KeyError:
expected_block = {}
if isinstance(expected_block, dict):
special = ["$ext"]
# This has to be a dict at the moment - might be possible at some
# point in future to allow a list of multiple ext functions as well
# but would require some changes in init. Probably need to abtract
# out the 'checking' a bit more.
for s in special:
try:
expected_block.pop(s)
except KeyError:
pass
if blockname == "headers":
# Special case for headers. These need to be checked in a case
# insensitive manner
block = {i.lower(): j for i, j in block.items()}
expected_block = {i.lower(): j for i, j in expected_block.items()}
logger.debug("Validating response %s against %s", blockname, expected_block)
# 'strict' could be a list, in which case we only want to enable strict
# key checking for that specific bit of the response
test_strictness = self.test_block_config["strict"]
if isinstance(test_strictness, list):
block_strictness = (blockname in test_strictness)
else:
block_strictness = test_strictness
self.recurse_check_key_match(expected_block, block, blockname, block_strictness)
def _save_value(self, key, to_check):
"""Save a value in the response for use in future tests
Args:
to_check (dict): An element of the response from which the given key
is extracted
key (str): Key to use
Returns:
dict: dictionary of save_name: value, where save_name is the key we
wanted to save this value as
"""
espec = self.expected
saved = {}
try:
expected = espec["save"][key]
except KeyError:
logger.debug("Nothing expected to save for %s", key)
return {}
if not to_check:
self._adderr("No %s in response (wanted to save %s)",
key, expected)
else:
for save_as, joined_key in expected.items():
split_key = joined_key.split(".")
try:
saved[save_as] = recurse_access_key(to_check, copy.copy(split_key))
except (IndexError, KeyError) as e:
self._adderr("Wanted to save '%s' from '%s', but it did not exist in the response",
joined_key, key, e=e)
if saved:
logger.debug("Saved %s for '%s' from response", saved, key)
return saved
|
from logging import getLogger
logger = getLogger("__name__")
|
import shlex
import diskspace
import argparse
import re
from . import argpar, utils, pathRender
def shell():
arguments = argpar.getarg()
parser = argpar.Arguments(description="Making it possible to use Linux df & du command on Windows", add_help=False)
parser.add_argument('--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')
parser.add_argument('-v', '--version', action='store_true', help='Show the version number and exit')
parser.add_argument('-bl', '--blacklist', nargs='+', help='Exclude name or file extensions matching arguments', default=None)
parser.add_argument('-wl', '--whitelist', nargs='+', help='Only include name or file extensions matching arguments', default=None)
parser.add_argument('-h', '--human', action='store_true', help='Convert bytes in to readable format')
parser.add_argument('-nf', '--nofolders', action='store_true', help='Ingore folders')
parser.add_argument('-p', '--path', nargs='+', help="Choose a different path to check diskspace")
parser.add_argument('-ns', '--nostats', action='store_true', help="Don't display disk space at top")
try:
arguments = arguments.replace("\\", "/")
args = parser.parse_args(shlex.split(arguments))
except Exception as e:
utils.exitcode(e)
if args.version:
utils.exitcode(f"DiskSpace version: {diskspace.__version__}")
nofolders = False if args.nofolders else True
nostats = False if args.nostats else True
if args.path:
custom_path = " ".join(args.path)
if not re.compile("^[A-Za-z]:\/|^\/").search(custom_path):
# Custom path from current dir
custom_path = "./" + " ".join(args.path)
else:
custom_path = "."
if args.blacklist and args.whitelist:
utils.exitcode("You can't define blacklist/whitelist alone.")
print(pathRender.ShowPath(
path=custom_path,
include_folder=nofolders,
include_stats=nostats,
whitelist=args.whitelist,
blacklist=args.blacklist,
human=args.human
).pretty_print)
def main():
try:
shell()
except KeyboardInterrupt:
print('\nCancelling...')
if __name__ == '__main__':
main()
|
import logging
from struct import unpack
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# https://msdn.microsoft.com/en-us/library/dd926359(v=office.12).aspx
def _parse_encryptionheader(blob):
(flags,) = unpack("<I", blob.read(4))
# if mode == 'strict': compare values with spec.
(sizeExtra,) = unpack("<I", blob.read(4))
(algId,) = unpack("<I", blob.read(4))
(algIdHash,) = unpack("<I", blob.read(4))
(keySize,) = unpack("<I", blob.read(4))
(providerType,) = unpack("<I", blob.read(4))
(reserved1,) = unpack("<I", blob.read(4))
(reserved2,) = unpack("<I", blob.read(4))
cspName = blob.read().decode("utf-16le")
header = {
"flags": flags,
"sizeExtra": sizeExtra,
"algId": algId,
"algIdHash": algIdHash,
"keySize": keySize,
"providerType": providerType,
"reserved1": reserved1,
"reserved2": reserved2,
"cspName": cspName,
}
return header
# https://msdn.microsoft.com/en-us/library/dd910568(v=office.12).aspx
def _parse_encryptionverifier(blob, algorithm):
(saltSize,) = unpack("<I", blob.read(4))
salt = blob.read(16)
encryptedVerifier = blob.read(16)
(verifierHashSize,) = unpack("<I", blob.read(4))
if algorithm == "RC4":
encryptedVerifierHash = blob.read(20)
elif algorithm == "AES":
encryptedVerifierHash = blob.read(32)
verifier = {
"saltSize": saltSize,
"salt": salt,
"encryptedVerifier": encryptedVerifier,
"verifierHashSize": verifierHashSize,
"encryptedVerifierHash": encryptedVerifierHash,
}
return verifier
|
#!/usr/bin/env python
import pytest
PUZZLE_INPUT = "273025-767253"
def increasing(number, submatching=False):
num = number
last = 0
repeated_chars = {}
for dec in [100_000, 10_000, 1_000, 100, 10, 1]:
a = num // dec
repeated_chars[a] = 1 if a not in repeated_chars else repeated_chars[a] + 1
num -= a * dec
if a < last:
return False
elif a == last:
last = a
else:
last = a
if submatching:
return 2 in repeated_chars.values()
else:
return any(x in repeated_chars.values() for x in [2, 3, 4, 5])
def increasing_part1(number):
return increasing(number, submatching=False)
def increasing_part2(number):
return increasing(number, submatching=True)
def solve1(start, end):
return len(set(filter(increasing_part1, range(start, end + 1))))
def solve2(start, end):
return len(set(filter(increasing_part2, range(start, end + 1))))
@pytest.mark.parametrize(
"number, result", [(123455, True), (122345, True), (412345, False), (123444, True)]
)
def test_increasing1(number, result):
assert result == increasing_part1(number)
@pytest.mark.parametrize(
"number, result",
[(123455, True), (122345, True), (412345, False), (111122, True), (123444, False)],
)
def test_increasing2(number, result):
assert result == increasing_part2(number)
def main():
start, end = map(int, PUZZLE_INPUT.split("-"))
print("Part1=" + str(solve1(start, end)))
print("Part2=" + str(solve2(start, end)))
if __name__ == "__main__":
main()
|
# Copyright 2020 William Ro. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====-==============================================================-==========
"""City means node"""
from ..spqr import censor
from ..console.console import console
from . import hall
import socket
class City(object):
"""
"""
def __init__(self, ip='', port=0, name='Roma', backlog=5, libs=None):
""""""
# Public attributes
self.name = censor.check_type(name, str)
# : Private attributes
# Check libs and instantiate city hall
if libs is None: libs = []
elif isinstance(libs, str): libs = [libs]
self._hall = hall.Hall(self, *censor.check_type(libs, inner_type=str))
self._backlog = censor.check_type(backlog, int)
# initiate a socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind socket
self.socket.bind((ip, port))
self._ip, self._port = self.socket.getsockname()
console.show_status('City {} (located in {}) has been created.'.format(
self.name, console.fancify(self.location_str, 'blue', 'underline')))
# region: Properties
@property
def location(self):
return self._ip, self._port
@property
def location_str(self):
return '{}:{}'.format(self._ip, self._port)
# endregion: Properties
# region: Private Methods
def _host(self):
console.show_status('{} is hosting ...'.format(self.name))
# endregion: Private Methods
# region: Public Methods
def enter_hall(self):
"""Enter the hall of this city"""
console.show_status('Entered city hall.')
while True:
console.write('<= ', color='green')
cmd = input()
try:
if self._hall.handle(cmd): return
except Exception as e:
console.show_status(
'Fail to execute command \'{}\'. Error message:'.format(cmd),
prompt='!!', color='red')
console.supplement(e, color='red')
# endregion: Public Methods
"""
# Put this city on listen mode
self._socket.listen(backlog)
"""
|
config = {'train': False, # train or run the model
'show_game': True, # when training/evaluating it is much faster to not display the game graphics
'print_score': 10000, # print when a multiple of this score is reached
'max_score': 10000000, # end the episode and update q-table when reaching this score
'resume_score': 100000, # if dies above this score, resume training from this difficult segment
}
|
#Description:
# - Extracts the license of a Github repository.
#Requirements:
# - You will need to enter your Github credentials
#Input:
# File LibraryData.json which contains all the library repositories
#Output:
# A pickle file called license.pkl, which will contain a dictionary where the key is a library repository, and the value of each key is
#a string containing the license used in the repository:
#[library repository] => [repository license]
#How to run:
# - Just run the script.
import os
import pickle
from github import Github, Repository, GitTag
from github.GithubException import UnknownObjectException
import getpass
import json
from scripts.CommonUtilities import Common_Utilities
from scripts.SharedFiles.utility_tool import read_json_file
import django
import pickle
import pygal
import traceback
from librarycomparison.models import Library
def loadLicenseData():
data = {}
filename = 'scripts/License/license.pkl'
if os.path.isfile(filename):
with open(filename, 'rb') as input:
try:
print("Loading data")
data = pickle.load(input)
except EOFError:
print("Failed to load pickle file")
return data
def saveData(data):
with open('scripts/License/license.pkl', 'wb') as output:
pickle.dump(data, output, pickle.DEFAULT_PROTOCOL)
def getLicenses():
config_dict = Common_Utilities.read_config_file() # read all ini data
token = config_dict["TOKEN"]
data = loadLicenseData()
github = Github(token)
libraries = Library.objects.all()
for library in libraries:
print ("Getting license for ", library.name)
try:
repo = github.get_repo(library.github_repo)
data[library.github_repo] = repo.get_license().license.name
saveData(data)
except UnknownObjectException:
print("ERROR: could not get license for lib", library.name)
traceback.print_exc()
data[library.github_repo] = 'None'
saveData(data)
continue
if __name__ == "__main__":
getLicenses()
|
from threedi_modelchecker.checks.base import CheckLevel
from threedi_modelchecker.checks.factories import generate_enum_checks
from threedi_modelchecker.checks.factories import generate_foreign_key_checks
from threedi_modelchecker.checks.factories import generate_geometry_checks
from threedi_modelchecker.checks.factories import generate_not_null_checks
from threedi_modelchecker.checks.factories import generate_unique_checks
from threedi_modelchecker.threedi_model import models
def test_gen_foreign_key_checks():
foreign_key_checks = generate_foreign_key_checks(models.Manhole.__table__)
assert len(foreign_key_checks) == 1
fk_check = foreign_key_checks[0]
assert models.Manhole.connection_node_id == fk_check.column
assert models.ConnectionNode.id == fk_check.reference_column
def test_gen_not_unique_checks():
not_unique_checks = generate_unique_checks(models.Manhole.__table__)
assert len(not_unique_checks) == 1
assert models.Manhole.id == not_unique_checks[0].column
def test_gen_not_null_checks():
not_null_checks = generate_not_null_checks(models.Manhole.__table__)
assert len(not_null_checks) == 3
not_null_check_columns = [check.column for check in not_null_checks]
assert models.Manhole.id in not_null_check_columns
def test_gen_geometry_check():
geometry_checks = generate_geometry_checks(models.ConnectionNode.__table__)
assert len(geometry_checks) == 2
geometry_check_columns = [check.column for check in geometry_checks]
assert models.ConnectionNode.the_geom in geometry_check_columns
def test_gen_enum_checks():
enum_checks = generate_enum_checks(models.BoundaryConditions2D.__table__)
assert len(enum_checks) == 1
assert enum_checks[0].column == models.BoundaryConditions2D.boundary_type
def test_gen_enum_checks_varcharenum():
enum_checks = generate_enum_checks(models.AggregationSettings.__table__)
assert len(enum_checks) == 2
enum_check_columns = [check.column for check in enum_checks]
assert models.AggregationSettings.aggregation_method in enum_check_columns
assert models.AggregationSettings.flow_variable in enum_check_columns
def test_gen_enum_checks_custom_mapping():
enum_checks = generate_enum_checks(
models.AggregationSettings.__table__,
custom_level_map={"aggregation_method": "WARNING"},
)
assert len(enum_checks) == 2
checks = {check.column.name: check for check in enum_checks}
assert checks["aggregation_method"].level == CheckLevel.WARNING
assert checks["flow_variable"].level == CheckLevel.ERROR
|
from usps_webtools.tracking import PackageTracking
from usps_webtools.ziptools import zipByAddress, zipByCityState, cityByZip
|
# The MIT License (MIT)
# Copyright (c) 2018 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import urllib.parse
import urllib.request
from typing import List, Tuple, Optional, Any, Union
from tornado import gen, ioloop, websocket
from cate.conf.defaults import WEBAPI_WORKSPACE_TIMEOUT, WEBAPI_RESOURCE_TIMEOUT, WEBAPI_PLOT_TIMEOUT
from cate.core.workspace import Workspace, OpKwArgs
from cate.core.wsmanag import WorkspaceManager
from cate.util.misc import encode_url_path
from cate.util.monitor import Monitor
from cate.util.safe import safe_eval
from cate.util.web.serviceinfo import join_address_and_port
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
class WebAPIWorkspaceManager(WorkspaceManager):
"""
Implementation of the WorkspaceManager interface against a WebSocket using a JSON RPC protocol.
"""
def __init__(self, service_info: dict, conn_timeout: float = 5, rpc_timeout: float = 120):
address = service_info.get('address', None) or 'localhost'
port = service_info.get('port', None)
if not port:
raise ValueError('missing "port" number in service_info argument')
self.base_url = f'http://{join_address_and_port(address, port)}'
self.ws_url = f'ws://{join_address_and_port(address, port)}/api'
self.ws_client = WebSocketClient(self.ws_url)
self.ws_client.connect(conn_timeout)
self.rpc_timeout = rpc_timeout
def _url(self, path_pattern: str, path_args: dict = None, query_args: dict = None) -> str:
return self.base_url + encode_url_path(path_pattern, path_args=path_args, query_args=query_args)
def _invoke_method(self, method, params, timeout: float = None,
monitor: Monitor = Monitor.NONE):
rpc_response = self.ws_client.invoke_method(method, params, timeout=timeout, monitor=monitor)
error_info = rpc_response.get('error')
if error_info:
WebAPIWorkspaceManager._raise_error(error_info)
return rpc_response.get('response')
def _fetch_json(self, url, data=None, timeout: float = None):
with urllib.request.urlopen(url, data=data, timeout=timeout or self.rpc_timeout) as response:
json_text = response.read()
json_response = json.loads(json_text.decode('utf-8'))
status = json_response.get('status')
if status == 'error':
WebAPIWorkspaceManager._raise_error(json_response.get('error'))
return json_response.get('content')
@staticmethod
def _raise_error(error_info):
exc_type = None
if error_info:
message = error_info.get('message') or ''
error_ex_info = error_info.get('data')
if error_ex_info:
exc_type_name = error_ex_info.get('exception')
if exc_type_name:
# noinspection PyBroadException
try:
exc_type = safe_eval(exc_type_name)
except Exception:
pass
# TODO (forman): find out how can we preserve traceback without adding it to the message string
# tb = error_ex_info.get('traceback')
else:
message = 'Unknown error from WebAPI service.'
exc = None
if exc_type:
# noinspection PyBroadException
try:
exc = exc_type(message)
except Exception:
pass
if exc is None:
exc = RuntimeError(message)
raise exc
# noinspection PyMethodMayBeStatic
def _query(self, **kwargs):
return {key: value for key, value in kwargs.items() if value is not None}
def _post_data(self, **kwargs):
data = urllib.parse.urlencode(self._query(**kwargs))
return data.encode() if data else None
@classmethod
def get_traceback_header(cls) -> str:
traceback_title = 'Cate WebAPI service traceback'
traceback_line = len(traceback_title) * '='
return '\n' + traceback_line + '\n' + traceback_title + '\n' + traceback_line + '\n'
@property
def root_path(self) -> Optional[str]:
return None
def resolve_path(self, path: str) -> str:
return path
def resolve_workspace_dir(self, path_or_name: str) -> str:
return path_or_name
def get_open_workspaces(self) -> List[Workspace]:
json_list = self._invoke_method("get_open_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return [Workspace.from_json_dict(ws_json_dict) for ws_json_dict in json_list]
def list_workspace_names(self) -> List[str]:
json_list = self._invoke_method("list_workspace_names", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return json_list
def get_workspace(self, base_dir: str) -> Workspace:
json_dict = self._invoke_method("get_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def new_workspace(self, base_dir: str, description: str = None) -> Workspace:
json_dict = self._invoke_method("new_workspace", dict(base_dir=base_dir, description=description),
timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def open_workspace(self, base_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("open_workspace", dict(base_dir=base_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def close_workspace(self, base_dir: str) -> None:
self._invoke_method("close_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
def close_all_workspaces(self) -> None:
self._invoke_method("close_all_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT)
def save_workspace_as(self, base_dir: str, to_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("save_workspace_as",
dict(base_dir=base_dir, to_dir=to_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def save_workspace(self, base_dir: str, monitor: Monitor = Monitor.NONE) -> Workspace:
json_dict = self._invoke_method("save_workspace", dict(base_dir=base_dir),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_dict)
def save_all_workspaces(self, monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("save_all_workspaces", dict(), timeout=WEBAPI_WORKSPACE_TIMEOUT, monitor=monitor)
def delete_workspace(self, base_dir: str, remove_completely: bool = False) -> None:
self._invoke_method("delete_workspace",
dict(base_dir=base_dir, remove_completely=remove_completely),
timeout=WEBAPI_WORKSPACE_TIMEOUT)
def clean_workspace(self, base_dir: str) -> Workspace:
json_dict = self._invoke_method("clean_workspace", dict(base_dir=base_dir), timeout=WEBAPI_WORKSPACE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def run_op_in_workspace(self, base_dir: str, op_name: str, op_args: OpKwArgs,
monitor: Monitor = Monitor.NONE) -> Union[Any, None]:
return self._invoke_method("run_op_in_workspace",
dict(base_dir=base_dir, op_name=op_name, op_args=op_args),
timeout=WEBAPI_WORKSPACE_TIMEOUT,
monitor=monitor)
def delete_workspace_resource(self, base_dir: str, res_name: str) -> Workspace:
json_dict = self._invoke_method("delete_workspace_resource",
dict(base_dir=base_dir, res_name=res_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def set_workspace_resource_persistence(self, base_dir: str, res_name: str, persistent: bool) -> Workspace:
json_dict = self._invoke_method("set_workspace_resource_persistence",
dict(base_dir=base_dir, res_name=res_name, persistent=persistent),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def set_workspace_resource(self,
base_dir: str,
op_name: str,
op_args: OpKwArgs,
res_name: Optional[str] = None,
overwrite: bool = False,
monitor: Monitor = Monitor.NONE) -> Tuple[Workspace, str]:
json_list = self._invoke_method("set_workspace_resource",
dict(base_dir=base_dir, res_name=res_name, op_name=op_name,
op_args=op_args, overwrite=overwrite),
timeout=WEBAPI_RESOURCE_TIMEOUT,
monitor=monitor)
return Workspace.from_json_dict(json_list[0]), json_list[1]
def rename_workspace_resource(self, base_dir: str,
res_name: str, new_res_name: str) -> Workspace:
json_dict = self._invoke_method("rename_workspace_resource",
dict(base_dir=base_dir, res_name=res_name, new_res_name=new_res_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
return Workspace.from_json_dict(json_dict)
def write_workspace_resource(self, base_dir: str, res_name: str,
file_path: str, format_name: str = None,
monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("write_workspace_resource",
dict(base_dir=base_dir, res_name=res_name,
file_path=file_path, format_name=format_name),
timeout=WEBAPI_RESOURCE_TIMEOUT)
def plot_workspace_resource(self, base_dir: str, res_name: str,
var_name: str = None, file_path: str = None,
monitor: Monitor = Monitor.NONE) -> None:
url = self._url('/ws/res/plot/{base_dir}/{res_name}',
path_args=dict(base_dir=base_dir, res_name=res_name),
query_args=self._query(var_name=var_name, file_path=file_path))
self._fetch_json(url, timeout=WEBAPI_RESOURCE_TIMEOUT + WEBAPI_PLOT_TIMEOUT)
def print_workspace_resource(self, base_dir: str, res_name_or_expr: str = None,
monitor: Monitor = Monitor.NONE) -> None:
self._invoke_method("print_workspace_resource",
dict(base_dir=base_dir, res_name_or_expr=res_name_or_expr),
timeout=WEBAPI_RESOURCE_TIMEOUT,
monitor=monitor)
def _create_scratch_dir(self, scratch_dir_name: str) -> str:
return ''
def _resolve_target_path(self, target_dir: str) -> str:
return ''
class WebSocketClient(object):
def __init__(self, url):
self.url = url
self.connection = None
self.current_method_id = 1
def connect(self, timeout: float):
ioloop.IOLoop.current().run_sync(self._connect, timeout=timeout)
def invoke_method(self, method, params, timeout, monitor: Monitor) -> dict:
json_rpc_request = self._new_rpc_request(method, params)
def do_json_rpc() -> dict:
return _do_json_rpc(self.connection, json_rpc_request, monitor)
return ioloop.IOLoop.current().run_sync(do_json_rpc, timeout=timeout)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
@gen.coroutine
def _connect(self):
self.connection = yield websocket.websocket_connect(self.url)
def _new_rpc_request(self, method_name, method_params) -> dict:
return dict(jsonrpc='2.0',
id=self._new_method_id(),
method=method_name,
params=method_params)
def _new_method_id(self) -> int:
new_method_id = self.current_method_id
self.current_method_id += 1
return new_method_id
@gen.coroutine
def _do_json_rpc(web_socket, rpc_request: dict, monitor: Monitor) -> dict:
web_socket.write_message(json.dumps(rpc_request))
work_reported = None
started = False
while True and (monitor is None or not monitor.is_cancelled()):
response_str = yield web_socket.read_message()
rpc_response = json.loads(response_str)
if 'progress' in rpc_response:
if monitor:
progress = rpc_response['progress']
total = progress.get('total')
label = progress.get('label')
worked = progress.get('worked')
msg = progress.get('message')
if not started:
monitor.start(label or "start", total_work=total)
started = True
if started:
if worked:
if work_reported is None:
work_reported = 0.0
work = worked - work_reported
work_reported = worked
else:
work = None
monitor.progress(work=work, msg=msg)
else:
if monitor and started:
monitor.done()
return rpc_response
return {}
|
import pygame, random, sys
from GameColor import *
from pygame.locals import *
WINDOWWIDTH = 800
WINDOWHEIGHT = 600
FPS = 60
REVEALSPEED = 8
BOXSIZE = 40
GAPSIZE = 10
BOARDWIDTH = 4
BOARDHEIGHT = 3
assert (BOARDHEIGHT * BOARDWIDTH) % 2 == 0, '边界必须是偶数'
XMARGIN = int((WINDOWWIDTH - (BOARDWIDTH * (BOXSIZE + GAPSIZE))) / 2)
YMARGIN = int((WINDOWHEIGHT - (BOARDHEIGHT * (BOXSIZE + GAPSIZE))) / 2)
BGCOLOR = NAVYBLUE
LIGHTBGCOLOR = GRAY
BOXCOLOR = WHITE
HIGHLIGHTCOLOR = BLUE
DONUT = 'donut'
SQUARE = 'square'
DIAMOND = 'diamond'
LINES = 'lines'
OVAL = 'oval'
ALLCOLORS = (RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE, CYAN)
ALLSHAPES = (DONUT, SQUARE, DIAMOND, LINES, OVAL)
assert len(ALLCOLORS) * len(ALLSHAPES) * 2 >= BOARDHEIGHT * BOARDWIDTH, "颜色总数 和 格子总数不匹配"
def main():
global DISPLAYER_SURF, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYER_SURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
mouseX = 0
mouxeY = 0
pygame.display.set_caption('Memory Puzzle')
mainboard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxedData(False)
firstSelection = None
DISPLAYER_SURF.fill(BGCOLOR)
startGameAnimation(mainboard)
while True:
mouseClicked = False
DISPLAYER_SURF.fill(BGCOLOR)
drawBoard(mainboard, revealedBoxes)
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif e.type == MOUSEMOTION:
mouseX, mouxeY = e.pos
elif e.type == MOUSEBUTTONUP:
mouseX, mouxeY = e.pos
mouseClicked = True
boxx, boxy = getBoxAtPixel(mouseX, mouxeY)
if boxx is not None and boxy is not None:
if not revealedBoxes[boxx][boxy]:
drawHighLightBox(boxx, boxy)
if not revealedBoxes[boxx][boxy] and mouseClicked:
revealBoxesAnimation(mainboard, [(boxx, boxy)])
revealedBoxes[boxx][boxy] = True
if firstSelection is None:
firstSelection = (boxx, boxy)
else:
icon1shape, icon1color = getShapeAndColor(mainboard, boxx, boxy)
icon2shape, icon2color = getShapeAndColor(mainboard, firstSelection[0], firstSelection[1])
if icon1shape != icon2shape or icon1color != icon2color:
pygame.time.wait(1000)
coverBoxesAnimation(mainboard, [(firstSelection[0], firstSelection[1]), (boxx, boxy)])
revealedBoxes[firstSelection[0]][firstSelection[1]] = False
revealedBoxes[boxx][boxy] = False
elif hasWon(revealedBoxes):
gameWonAnimation(mainboard)
pygame.time.wait(2000)
mainboard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxedData(False)
drawBoard(mainboard, revealedBoxes)
pygame.display.update()
pygame.time.wait(1000)
startGameAnimation(mainboard)
firstSelection = None
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRevealedBoxedData(val):
revealedBoxes = []
for i in range(BOARDWIDTH):
revealedBoxes.append([val] * BOARDHEIGHT)
return revealedBoxes
def getRandomizedBoard():
icons = []
for color in ALLCOLORS:
for shape in ALLSHAPES:
icons.append((shape, color))
random.shuffle(icons)
numIconUsed = int(BOARDWIDTH * BOARDHEIGHT / 2)
icons = icons[:numIconUsed] * 2
random.shuffle(icons)
board = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(icons[0])
del icons[0]
board.append(column)
return board
def splitIntoGroupsOf(groupSize, theList):
result = []
for i in range(0, len(theList), groupSize):
result.append(theList[i:i + groupSize])
return result
def leftTopCoordsOfBox(boxx, boxy):
left = boxx * (BOXSIZE + GAPSIZE) + XMARGIN
top = boxy * (BOXSIZE + GAPSIZE) + YMARGIN
return (left, top)
def getBoxAtPixel(x, y):
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
boxRect = pygame.Rect(left, top, BOXSIZE, BOXSIZE)
if boxRect.collidepoint(x, y):
return (boxx, boxy)
return (None, None)
def drawIcon(shape, color, boxx, boxy):
quarter = int(BOXSIZE * 0.25)
half = int(BOXSIZE * 0.5)
left, top = leftTopCoordsOfBox(boxx, boxy)
if shape == DONUT:
pygame.draw.circle(DISPLAYER_SURF, color, (left + half, top + half), half - 5)
pygame.draw.circle(DISPLAYER_SURF, color, (left + half, top + half), quarter - 5)
elif shape == SQUARE:
pygame.draw.rect(DISPLAYER_SURF, color, (left + quarter, top + quarter, BOXSIZE - half, BOXSIZE - half))
elif shape == DIAMOND:
pygame.draw.polygon(DISPLAYER_SURF, color,
((left + half, top), (left + BOXSIZE - 1, top + half), (left + half, top + BOXSIZE - 1),
(left, top + half)))
elif shape == LINES:
for i in range(0, BOXSIZE, 4):
pygame.draw.line(DISPLAYER_SURF, color, (left, top + i), (left + i, top))
pygame.draw.line(DISPLAYER_SURF, color, (left + i, top + BOXSIZE - i), (left + BOXSIZE - i, top + i))
elif shape == OVAL:
pygame.draw.ellipse(DISPLAYER_SURF, color, (left, top + quarter, BOXSIZE, half))
def getShapeAndColor(board, boxx, boxy):
return board[boxx][boxy][0], board[boxx][boxy][1]
def drawBoxCovers(board, boxes, coverage):
for box in boxes:
left, top = leftTopCoordsOfBox(box[0], box[1])
pygame.draw.rect(DISPLAYER_SURF, BGCOLOR, (left, top, BOXSIZE, BOXSIZE))
shaper, color = getShapeAndColor(board, box[0], box[1])
drawIcon(shaper, color, box[0], box[1])
if coverage > 0:
pygame.draw.rect(DISPLAYER_SURF, BOXCOLOR, (left, top, coverage, BOXSIZE))
pygame.display.update()
FPSCLOCK.tick(FPS)
def revealBoxesAnimation(board, boxesToReveal):
for coverage in range(BOXSIZE, -1, -REVEALSPEED):
drawBoxCovers(board, boxesToReveal, coverage)
def coverBoxesAnimation(board, boxesToCover):
for coverage in range(0, BOXSIZE + REVEALSPEED, REVEALSPEED):
drawBoxCovers(board, boxesToCover, coverage)
def drawBoard(board, revealed):
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
if not revealed[boxx][boxy]:
pygame.draw.rect(DISPLAYER_SURF, BOXCOLOR, (left, top, BOXSIZE, BOXSIZE))
else:
shape, color = getShapeAndColor(board, boxx, boxy)
drawIcon(shape, color, boxx, boxy)
def drawHighLightBox(boxx, boxy):
left, top = leftTopCoordsOfBox(boxx, boxy)
pygame.draw.rect(DISPLAYER_SURF, HIGHLIGHTCOLOR, (left - 5, top - 5, BOXSIZE + 10, BOXSIZE - 10), 4)
def startGameAnimation(board):
coveredBoxes = generateRevealedBoxedData(False)
boxes = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
boxes.append((x, y))
random.shuffle(boxes)
boxGroups = splitIntoGroupsOf(8, boxes)
drawBoard(board, coveredBoxes)
for boxGroup in boxGroups:
revealBoxesAnimation(board, boxGroup)
coverBoxesAnimation(board, boxGroup)
def gameWonAnimation(board):
coveredBoxes = generateRevealedBoxedData(True)
color1 = LIGHTBGCOLOR
color2 = BGCOLOR
for i in range(13):
color1, color2 = color2, color1
DISPLAYER_SURF.fill(color1)
drawBoard(board, coveredBoxes)
pygame.display.update()
pygame.time.wait(300)
def hasWon(revealedBoxes):
for i in revealedBoxes:
if False in i:
return False
return True
if __name__ == '__main__':
main()
|
# Copyright (c) 2021 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""This module defines various convenience functions for generating shapes.
The methods here provide routes for generating instances of
:class:`~coxeter.shapes.Shape` based on certain pre-specified mappings.
"""
from .shapes import (
Circle,
ConvexPolygon,
ConvexPolyhedron,
ConvexSpheropolygon,
ConvexSpheropolyhedron,
Ellipse,
Ellipsoid,
Polygon,
Polyhedron,
Sphere,
)
def from_gsd_type_shapes(params, dimensions=3): # noqa: C901
"""Create a :class:`~.Shape` from a dict conforming to the GSD schema.
See :ref:`here <gsd:shapes>` for the specification of the schema. Note that
the schema does not differentiate between 2D and 3D shapes for spheres (vs.
circles) and ellipsoids (vs. ellipses) because in context the
dimensionality of those shapes can be inferred from simulation boxes. To
address this ambiguity, this function accepts a ``dimensions`` parameter
that can be used to disambiguate explicitly between these two cases.
Args:
params (dict):
The parameters of the shape to construct.
dimensions (int):
The dimensionality of the shape (either 2 or 3). Ignored except
when the shape is a sphere or an ellipsoid, in which case a value
of 2 is used to indicate generating a
:class:`~.shapes.Circle` or :class:`~.shapes.Ellipse`
instead of a :class:`~.shapes.Sphere` or
:class:`~.shapes.Ellipsoid` (Default value: 3).
Returns:
:class:`~coxeter.shapes.Shape`:
The desired shape.
"""
if "type" not in params:
raise ValueError(
"The parameters are malformed, there must be a type "
"key indicating what type of shape this is."
)
if params["type"] == "Sphere":
if dimensions == 2:
return Circle(params["diameter"] / 2)
else:
return Sphere(params["diameter"] / 2)
elif params["type"] == "Ellipsoid":
if dimensions == 2:
return Ellipse(params["a"], params["b"])
else:
return Ellipsoid(params["a"], params["b"], params["c"])
elif params["type"] == "Polygon":
if "rounding_radius" in params:
return ConvexSpheropolygon(params["vertices"], params["rounding_radius"])
else:
try:
return ConvexPolygon(params["vertices"])
except ValueError:
# If it's not a convex polygon, return a simple polygon.
return Polygon(params["vertices"])
elif params["type"] == "ConvexPolyhedron":
if "rounding_radius" in params:
return ConvexSpheropolyhedron(params["vertices"], params["rounding_radius"])
else:
return ConvexPolyhedron(params["vertices"])
elif params["type"] == "Mesh":
return Polyhedron(params["vertices"], params["faces"])
else:
raise ValueError("Unsupported shape type.")
|
def number_format(n):
return "{:,}".format(n)
|
import re
CAMEL_CASE_RE = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camel_to_snake(s: str) -> str:
"""Convert string from camel case to snake case."""
return CAMEL_CASE_RE.sub(r'_\1', s).strip().lower()
def snake_to_camel(s: str) -> str:
"""Convert string from snake case to camel case."""
fragments = s.split('_')
return fragments[0] + ''.join(x.title() for x in fragments[1:])
|
# -*- coding: utf-8 -*-
'''
otsu.fun - SSWA Utils
@version: 0.1
@author: PurePeace
@time: 2020-01-07
@describe: a treasure house!!!
'''
import time, datetime
# way to return utInfo, decorator
def messager(func):
def wrapper(*args, **kwargs):
data, message, info, status = func(*args,**kwargs)
if message == '': message = info
return messageMaker(data, message, info + statusInfo.get(status,'状态未知'), status)
return wrapper
# make messager
def messageMaker(data=None, message=None, info=None, status=None):
return {'message':message, 'data':data, 'status':status, 'info': info, 'time':getTime(1)}
# get now timeString or timeStamp
def getTime(needFormat=0, formatMS=True):
if needFormat != 0:
return datetime.datetime.now().strftime(f'%Y-%m-%d %H:%M:%S{r".%f" if formatMS else ""}')
else:
return time.time()
# timeString to timeStamp
def toTimeStamp(timeString):
if '.' not in timeString: getMS = False
else: getMS=True
timeTuple = datetime.datetime.strptime(timeString, f'%Y-%m-%d %H:%M:%S{r".%f" if getMS else ""}')
return float(f'{str(int(time.mktime(timeTuple.timetuple())))}' + (f'.{timeTuple.microsecond}' if getMS else ''))
# timeStamp to timeString
def toTimeString(timeStamp):
if type(timeStamp) == int: getMS = False
else: getMS = True
timeTuple = datetime.datetime.utcfromtimestamp(timeStamp + 8 * 3600)
return timeTuple.strftime(f'%Y-%m-%d %H:%M:%S{r".%f" if getMS else ""}')
# generate method docs str
def docsParameter(sub):
def dec(obj):
obj.__doc__ = sub
return obj
return dec
# make text include time
def logtext(text):
logtext = f'[{getTime(1)}]: {text}'
print(logtext)
return logtext
# make request record info
def makeRequestInfo(request):
return {
'remote_addr': request.remote_addr,
'system': request.headers.get('system_info'),
'request': {
'environ': request.environ,
'url': request.url
}
}
# make authorize info
def makeAuthorizeInfo(request):
otsuToken, osuid = request.headers.get('X-Otsutoken'), request.headers.get('osuid')
if otsuToken == None or osuid == None: status = -1
else: status = 1
return {'otsuToken': otsuToken, 'osuid': osuid, 'path': request.path.strip('/')}, status
statusInfo = {
1: '成功',
-1: '失败'
}
# run? not.
if __name__ == '__main__':
print('wow, you find a treasure house!!! so it dosent work')
|
"""
Models for our cerbere app.
"""
import uuid
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager
from django.db import models
from django.utils.translation import gettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
class User(AbstractBaseUser, PermissionsMixin):
"""
Partaj users are expected to authenticate themselves through Cerbère, an identity
provider that uses CAS/SAML to interoperate with applications.
"""
# Generic fields to build up minimal data on any user
id = models.UUIDField(
verbose_name=_("id"),
help_text=_("Primary key for the user as UUID"),
primary_key=True,
default=uuid.uuid4,
editable=False,
)
date_joined = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(verbose_name=_("updated at"), auto_now=True)
is_active = models.BooleanField(
verbose_name=_("active"),
help_text=_(
"Designates whether this user should be treated as active. "
"Unselect this instead of deleting accounts."
),
default=True,
)
is_staff = models.BooleanField(
verbose_name=_("staff status"),
help_text=_("Designates whether the user can log into this admin site."),
default=False,
)
# Information we can pick up from our identity provider
username = models.CharField(
verbose_name=_("username"),
help_text=_("unique human readable username"), # ==email for Cerbère users
max_length=255,
unique=True,
)
first_name = models.CharField(
verbose_name=_("first name"), max_length=255, blank=True
)
last_name = models.CharField(
verbose_name=_("last name"), max_length=255, blank=True
)
email = models.EmailField(verbose_name=_("email"), max_length=255, unique=True)
phone_number = PhoneNumberField(
verbose_name=_("phone number"),
help_text=_("Phone number for this user"),
blank=True,
)
unit_name = models.CharField(
verbose_name=_("unit name"), max_length=255, blank=True
)
title = models.CharField(verbose_name=_("title"), max_length=255, blank=True)
objects = UserManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
def __str__(self):
"""
String representation of a user, for internal purposes.
"""
return f"{self._meta.verbose_name.title()} <{self.username}>"
def get_full_name(self):
"""
Get a string showing a user's full name. Avoid doing random concatenation throughout the
app and get a consistent name for our user.
"""
return f"{self.first_name} {self.last_name}"
class Meta:
db_table = "partaj_user"
verbose_name = _("user")
|
import sys
sys.path.insert(0, '.')
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import pytorch_to_caffe
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (5, 5))
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(256, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
print("in forward function, type of x is: ")
print(type(x))
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
if __name__ == '__main__':
model_path = "./lenet.pth"
model = LeNet()
model = torch.load(model_path)
model.eval()
name = "LeNet"
input = torch.ones([1, 1, 28, 28])
pytorch_to_caffe.trans_net(model, input, name)
pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
|
import numpy as np
from bokeh.models import ColumnDataSource
from powersimdata.design.compare.transmission import (
calculate_branch_difference,
calculate_dcline_difference,
)
from powersimdata.utility.distance import haversine
from postreise.plot import colors
from postreise.plot.canvas import create_map_canvas
from postreise.plot.check import _check_func_kwargs
from postreise.plot.plot_states import add_state_borders
from postreise.plot.projection_helpers import project_branch
def add_transmission_upgrades(
canvas,
branch_merge,
dc_merge,
b2b_indices=None,
diff_threshold=100,
all_branch_scale=1,
diff_branch_scale=1,
all_branch_min=0.1,
diff_branch_min=1.0,
b2b_scale=5,
dcline_upgrade_dist_threshold=0,
):
"""Make map of branches showing upgrades.
:param bokeh.plotting.figure.Figure canvas: canvas to add upgrades to.
:param pandas.DataFrame branch_merge: branch of scenarios 1 and 2
:param pandas.DataFrame dc_merge: dclines for scenarios 1 and 2
:param list/set/tuple b2b_indices: indices of HVDC lines which are back-to-backs.
:param int/float diff_threshold: difference threshold (in MW), above which branches
are highlighted.
:param int/float all_branch_scale: scale factor for plotting all branches
(pixels/GW).
:param int/float diff_branch_scale: scale factor for plotting branches with
differences above the threshold (pixels/GW).
:param int/float all_branch_min: minimum width to plot all branches.
:param int/float diff_branch_min: minimum width to plot branches with significant
differences.
:param int/float b2b_scale: scale factor for plotting b2b facilities (pixels/GW).
:param int/float dcline_upgrade_dist_threshold: minimum distance (miles) for
plotting DC line upgrades (if none are longer, no legend entry will be created).
:return: (*bokeh.plotting.figure.Figure*) -- Bokeh map plot of color-coded upgrades.
"""
# plotting constants
legend_alpha = 0.9
all_elements_alpha = 0.5
differences_alpha = 0.8
# convert scale factors from pixels/GW to pixels/MW (base units for our grid data)
all_branch_scale_MW = all_branch_scale / 1000 # noqa: N806
diff_branch_scale_MW = diff_branch_scale / 1000 # noqa: N806
b2b_scale_MW = b2b_scale / 1000 # noqa: N806
# data prep
branch_all = project_branch(branch_merge)
branch_dc = project_branch(dc_merge)
# For these, we will plot a triangle for the B2B location, plus 'pseudo' AC lines
# get_level_values allows us to index into MultiIndex as necessary
b2b_indices = {} if b2b_indices is None else b2b_indices
b2b_mask = branch_dc.index.get_level_values(0).isin(b2b_indices)
# .copy() avoids a pandas SettingWithCopyError later
b2b = branch_dc.iloc[b2b_mask].copy()
branch_dc_lines = branch_dc.loc[~b2b_mask].copy()
# Color branches based on upgraded capacity
branch_all["color"] = np.nan
branch_all.loc[branch_all["diff"] > diff_threshold, "color"] = colors.be_blue
branch_all.loc[branch_all["diff"] < -1 * diff_threshold, "color"] = colors.be_purple
# Color pseudo AC branches based on upgraded capacity
b2b["color"] = np.nan
b2b.loc[b2b["diff"] > diff_threshold, "color"] = colors.be_blue
b2b.loc[b2b["diff"] < -1 * diff_threshold, "color"] = colors.be_purple
b2b = b2b[~b2b.color.isnull()]
# Color DC lines based on upgraded capacity
branch_dc_lines["dist"] = branch_dc_lines.apply(
lambda x: haversine((x.from_lat, x.from_lon), (x.to_lat, x.to_lon)), axis=1
)
branch_dc_lines = branch_dc_lines.loc[
branch_dc_lines.dist >= dcline_upgrade_dist_threshold
]
branch_dc_lines.loc[:, "color"] = np.nan
branch_dc_lines.loc[branch_dc_lines["diff"] > 0, "color"] = colors.be_green
branch_dc_lines.loc[branch_dc_lines["diff"] < 0, "color"] = colors.be_lightblue
# Create ColumnDataSources for bokeh to plot with
source_all_ac = ColumnDataSource(
{
"xs": branch_all[["from_x", "to_x"]].values.tolist(),
"ys": branch_all[["from_y", "to_y"]].values.tolist(),
"cap": branch_all["rateA"] * all_branch_scale_MW + all_branch_min,
"color": branch_all["color"],
}
)
# AC branches with significant differences
ac_diff_branches = branch_all.loc[~branch_all.color.isnull()]
source_ac_difference = ColumnDataSource(
{
"xs": ac_diff_branches[["from_x", "to_x"]].values.tolist(),
"ys": ac_diff_branches[["from_y", "to_y"]].values.tolist(),
"diff": (
ac_diff_branches["diff"].abs() * diff_branch_scale_MW + diff_branch_min
),
"color": ac_diff_branches["color"],
}
)
source_all_dc = ColumnDataSource(
{
"xs": branch_dc_lines[["from_x", "to_x"]].values.tolist(),
"ys": branch_dc_lines[["from_y", "to_y"]].values.tolist(),
"cap": branch_dc_lines.Pmax * all_branch_scale_MW + all_branch_min,
"color": branch_dc_lines["color"],
}
)
dc_diff_lines = branch_dc_lines.loc[~branch_dc_lines.color.isnull()]
source_dc_differences = ColumnDataSource(
{
"xs": dc_diff_lines[["from_x", "to_x"]].values.tolist(),
"ys": dc_diff_lines[["from_y", "to_y"]].values.tolist(),
"diff": (
dc_diff_lines["diff"].abs() * diff_branch_scale_MW + diff_branch_min
),
"color": dc_diff_lines["color"],
}
)
source_pseudoac = ColumnDataSource( # pseudo ac scen 1
{
"xs": b2b[["from_x", "to_x"]].values.tolist(),
"ys": b2b[["from_y", "to_y"]].values.tolist(),
"cap": b2b.Pmax * all_branch_scale_MW + all_branch_min,
"diff": b2b["diff"].abs() * diff_branch_scale_MW + diff_branch_min,
"color": b2b["color"],
}
)
# Build the legend
leg_x = [-8.1e6] * 2
leg_y = [5.2e6] * 2
# These are 'dummy' series to populate the legend with
if len(branch_dc_lines[branch_dc_lines["diff"] > 0]) > 0:
canvas.multi_line(
leg_x,
leg_y,
color=colors.be_green,
alpha=legend_alpha,
line_width=10,
legend_label="Additional HVDC Capacity",
)
if len(branch_dc_lines[branch_dc_lines["diff"] < 0]) > 0:
canvas.multi_line(
leg_x,
leg_y,
color=colors.be_lightblue,
alpha=legend_alpha,
line_width=10,
legend_label="Reduced HVDC Capacity",
)
if len(branch_all[branch_all["diff"] < 0]) > 0:
canvas.multi_line(
leg_x,
leg_y,
color=colors.be_purple,
alpha=legend_alpha,
line_width=10,
legend_label="Reduced AC Transmission",
)
if len(branch_all[branch_all["diff"] > 0]) > 0:
canvas.multi_line(
leg_x,
leg_y,
color=colors.be_blue,
alpha=legend_alpha,
line_width=10,
legend_label="Upgraded AC transmission",
)
if len(b2b[b2b["diff"] > 0]) > 0:
canvas.scatter(
x=b2b.from_x[1],
y=b2b.from_y[1],
color=colors.be_magenta,
marker="triangle",
legend_label="Upgraded B2B capacity",
size=30,
alpha=legend_alpha,
)
# Everything below gets plotted into the 'main' figure
background_plot_dicts = [
{"source": source_all_ac, "color": "gray", "line_width": "cap"},
{"source": source_all_dc, "color": "gray", "line_width": "cap"},
{"source": source_pseudoac, "color": "gray", "line_width": "cap"},
]
for d in background_plot_dicts:
canvas.multi_line(
"xs",
"ys",
color=d["color"],
line_width=d["line_width"],
source=d["source"],
alpha=all_elements_alpha,
)
# all B2Bs
canvas.scatter(
x=b2b.from_x,
y=b2b.from_y,
color="gray",
marker="triangle",
size=b2b["Pmax"].abs() * b2b_scale_MW,
alpha=all_elements_alpha,
)
difference_plot_dicts = [
{"source": source_pseudoac, "color": "color", "line_width": "diff"},
{"source": source_ac_difference, "color": "color", "line_width": "diff"},
{"source": source_dc_differences, "color": "color", "line_width": "diff"},
]
for d in difference_plot_dicts:
canvas.multi_line(
"xs",
"ys",
color=d["color"],
line_width=d["line_width"],
source=d["source"],
alpha=differences_alpha,
)
# B2Bs with differences
canvas.scatter(
x=b2b.from_x,
y=b2b.from_y,
color=colors.be_magenta,
marker="triangle",
size=b2b["diff"].abs() * b2b_scale_MW,
)
return canvas
def map_transmission_upgrades(
scenario1,
scenario2,
b2b_indices=None,
figsize=(1400, 800),
x_range=None,
y_range=None,
state_borders_kwargs=None,
legend_font_size=20,
legend_location="bottom_left",
**plot_kwargs,
):
"""Plot capacity differences for branches & HVDC lines between two scenarios.
:param powersimdata.scenario.scenario.Scenario scenario1: first scenario.
:param powersimdata.scenario.scenario.Scenario scenario2: second scenario.
:param list/set/tuple b2b_indices: indices of HVDC lines which are back-to-backs.
:param tuple figsize: size of the bokeh figure (in pixels).
:param tuple x_range: x range to zoom plot to (EPSG:3857).
:param tuple y_range: y range to zoom plot to (EPSG:3857).
:param dict state_borders_kwargs: keyword arguments to be passed to
:func:`postreise.plot.plot_states.add_state_borders`.
:param int/float legend_font_size: font size for legend.
:param str legend_location: location for legend.
:param \\*\\*plot_kwargs: collected keyword arguments to be passed to
:func:`add_transmission_upgrades`.
:raises ValueError: if grid model and interconnect of scenarios differ.
:return: (*bokeh.plotting.figure.Figure*) -- map with color-coded upgrades.
"""
# Validate inputs
if not (
scenario1.info["grid_model"] == scenario2.info["grid_model"]
and scenario1.info["interconnect"] == scenario2.info["interconnect"]
):
raise ValueError("Scenarios to compare must be same grid model & interconnect")
# Pre-plot data processing
grid1 = scenario1.get_grid()
grid2 = scenario2.get_grid()
branch_merge = calculate_branch_difference(grid1.branch, grid2.branch)
dc_merge = calculate_dcline_difference(grid1, grid2)
# Set up figure
canvas = create_map_canvas(figsize=figsize, x_range=x_range, y_range=y_range)
# Add state outlines
default_state_borders_kwargs = {
"line_color": "slategrey",
"line_width": 1,
"fill_alpha": 1,
"background_map": False,
}
all_state_borders_kwargs = (
{**default_state_borders_kwargs, **state_borders_kwargs}
if state_borders_kwargs is not None
else default_state_borders_kwargs
)
_check_func_kwargs(
add_state_borders, set(all_state_borders_kwargs), "state_borders_kwargs"
)
canvas = add_state_borders(canvas, **all_state_borders_kwargs)
# add transmission map
canvas = add_transmission_upgrades(
canvas, branch_merge, dc_merge, b2b_indices, **plot_kwargs
)
canvas.legend.location = legend_location
canvas.legend.label_text_font_size = f"{legend_font_size}pt"
return canvas
|
"""Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import numpy as np
import models
import csv
import pandas as pd
from PIL import Image
import StringIO
import tensorflow as tf
from timeit import default_timer as timer
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path', './model_ckpts', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '../../dataset/images', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', './output_dir', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 10, 'How many images process at one time.')
tf.flags.DEFINE_integer(
'iternum', 8, 'How many iterations does the attacker runs.')
tf.flags.DEFINE_float(
'learning_rate', 0.2, 'The learning rate of attacker.')
tf.flags.DEFINE_float(
'margin', 0.01, 'margin parameter in the loss function.')
tf.flags.DEFINE_string(
'whitebox_train', '5,6,7,8,9', 'models for whitebox training.')
tf.flags.DEFINE_string(
'test', '0,1,2,3,4,5,6,7,8,9,10', 'models for testing.')
FLAGS = tf.flags.FLAGS
def string_to_list(s):
return [int(x) for x in filter(None, s.split(','))]
def load_target_class(input_dir):
"""Loads target classes."""
with tf.gfile.Open(os.path.join(input_dir, 'target_class.csv')) as f:
return {row[0]: int(row[1]) for row in csv.reader(f) if len(row) >= 2}
def load_images(input_dir, batch_shape):
images = np.zeros(batch_shape)
filenames = []
target_class_dict = load_target_class(input_dir)
target_class_batch = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
# original images
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
# Get target class.
fname = os.path.basename(filepath)
target_class = target_class_dict[fname]
target_class_batch.append(target_class)
idx += 1
if idx == batch_size:
yield filenames, images, target_class_batch
filenames = []
target_class_batch = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images, target_class_batch
def save_images(images, filenames, output_dir):
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
class Evaluator(object):
def __init__(self, name, models, image, image_input, true_label, test):
errors = []
for i in test:
correct_prediction = tf.equal(tf.argmax(models[i].logits, axis=1), tf.cast(true_label, tf.int64))
error = 1 - tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
errors.append(error)
self.name = name
self.errors = errors
self.processed_batch_num = 0
self.overall_errors = np.zeros(len(test))
self.label = true_label
self.image_input = image_input
self.assign_image = tf.assign(image, image_input)
def run(self, sess, image_input, y):
sess.run(self.assign_image, feed_dict={self.image_input: image_input})
errors = sess.run(self.errors, feed_dict={self.label: y})
print('%s evaluation errors: %s' % (self.name, errors))
self.processed_batch_num += 1
self.overall_errors += errors
if self.processed_batch_num % 10 == 0:
print('%s overall evaluation errors: %s' % (self.name, self.overall_errors / self.processed_batch_num))
class Attacker(object):
def __init__(self, name, models, image_input, image, true_label, max_epsilon, k, train, test, margin, learning_rate):
self.name = name
self.models = models
self.max_epsilon = max_epsilon
self.k = k
self.processed_batch_num = 0
self.overall_train_errors = np.zeros(len(train))
self.overall_test_errors = np.zeros(len(test))
# placeholders
self.label = true_label
self.image_input = image_input
self.image = image
self.assign_image = tf.assign(image, image_input)
self.assign_add_image = tf.assign_add(image, image_input)
label_mask = tf.one_hot(true_label, 1001, on_value=1.0, off_value=0.0, dtype=tf.float32)
def define_errors(model_indices):
errors = []
for i in model_indices:
correct_prediction = tf.equal(tf.argmax(models[i].logits, axis=1), tf.cast(true_label, tf.int64))
error = 1 - tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
errors.append(error)
return errors
self.train_errors = define_errors(train)
self.test_errors = define_errors(test)
# define mixture loss
softmax_prob_sum = 0
for i in train:
softmax_prob_sum += tf.reduce_sum(tf.nn.softmax(models[i].logits) * label_mask, axis=1)
self.mixture_loss = (-1.0) * tf.reduce_mean(tf.log(margin + softmax_prob_sum))
# define gradient
grad = tf.gradients(self.mixture_loss, image)[0]
# define optimization step
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate * max_epsilon)
self.all_model_gradient_step = opt.apply_gradients([(tf.sign(grad), image)])
self.apply_null = opt.apply_gradients([(tf.zeros(image.get_shape().as_list(), dtype=tf.float32), image)])
# define clipping step
clipped_image = tf.clip_by_value(image, image_input - max_epsilon, image_input + max_epsilon)
clipped_image = tf.clip_by_value(clipped_image, -1, 1)
self.clipping_step = tf.assign(image, clipped_image)
def run(self, sess, x_batch, y=None):
sess.run(self.assign_image, feed_dict={self.image_input: x_batch})
if y is None:
y = sess.run(self.models[0].preds)
start = timer()
for i in range(self.k):
sess.run(self.all_model_gradient_step, feed_dict={self.label: y, self.image_input: x_batch})
sess.run(self.clipping_step, feed_dict={self.image_input: x_batch})
end = timer()
x_adv, train_errors_after_attack, test_errors_after_attack = sess.run(
[self.image, self.train_errors, self.test_errors], feed_dict={self.label: y})
print('%s -- time: %g sec, train_errors: %s, test_errors: %s' % (
self.name, end - start, train_errors_after_attack, test_errors_after_attack))
sys.stdout.flush()
self.processed_batch_num += 1
self.overall_train_errors += train_errors_after_attack
self.overall_test_errors += test_errors_after_attack
return y, (x_adv - x_batch)
def main(_):
full_start = timer()
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Prepare graph
image_input = tf.placeholder(tf.float32, shape=batch_shape)
image = tf.get_variable('adversarial_image', shape=batch_shape)
label = tf.placeholder(tf.int32, shape=FLAGS.batch_size)
sess = tf.Session()
initialized_vars = set()
savers = []
# list of models in our ensemble
# model 0-4
all_models = [models.InceptionResNetV2Model, models.InceptionV3Model, models.InceptionV4Model,
models.ResNetV1Model, models.ResNetV2Model, models.VGG16]
# model 5-10
all_models += [models.EnsAdvInceptionResNetV2Model, models.AdvInceptionV3Model, models.Ens3AdvInceptionV3Model,
models.Ens4AdvInceptionV3Model, models.KerasXceptionModel]
# model 11
all_models += [models.SmoothInceptionResNetV2Model]
whitebox_train = string_to_list(FLAGS.whitebox_train)
test = string_to_list(FLAGS.test)
indices_to_load = [index for index in range(len(all_models)) if
index in whitebox_train + test]
# build all the models and specify the saver
for i, model in enumerate(all_models):
all_models[i] = model(num_classes)
if hasattr(all_models[i], 'isKerasModel') and all_models[i].isKerasModel:
if i in indices_to_load:
all_models[i](sess, batch_size=FLAGS.batch_size, image=image, ckpt_path=FLAGS.checkpoint_path)
savers.append(None)
else:
all_models[i](image, FLAGS.batch_size)
all_vars = slim.get_model_variables()
uninitialized_vars = set(all_vars) - initialized_vars
saver_dict = {v.op.name[len(all_models[i].name) + 1:]: v for v in uninitialized_vars}
savers.append(tf.train.Saver(saver_dict))
initialized_vars = set(all_vars)
whitebox_ratio = 1.0
with tf.variable_scope('whitebox-attacker'):
whitebox_attacker = Attacker(name='whitebox-attacker', models=all_models, image_input=image_input, image=image, true_label=label, max_epsilon=eps * whitebox_ratio, k=FLAGS.iternum, train=whitebox_train, test=[], margin=FLAGS.margin, learning_rate=FLAGS.learning_rate)
with tf.variable_scope('raw_evaluator'):
original_eval = Evaluator(name='original', models=all_models, image_input=image_input, image=image, true_label=label, test=test)
# Run computation
tot_time = 0.0
processed = 0.0
sess.run(tf.global_variables_initializer())
for i in indices_to_load:
if hasattr(all_models[i], 'isKerasModel') and all_models[i].isKerasModel:
pass
else:
savers[i].restore(sess, FLAGS.checkpoint_path + '/' + all_models[i].ckpt)
print("Initialization done after {} sec".format(timer() - full_start))
for filenames, images, target_classes in load_images(FLAGS.input_dir, batch_shape):
start = timer()
_, whitebox_perturb = whitebox_attacker.run(sess, images, target_classes)
images += whitebox_perturb
original_eval.run(sess, images, target_classes)
if FLAGS.output_dir != '':
save_images(images, filenames, FLAGS.output_dir)
end = timer()
tot_time += end - start
processed += FLAGS.batch_size
full_end = timer()
print("DONE: Processed {} images in {} sec".format(processed, full_end - full_start))
if __name__ == '__main__':
tf.app.run()
|
import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class PascalContextDataset(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'table', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor', 'bag', 'bed', 'bench', 'book', 'building',
'cabinet', 'ceiling', 'cloth', 'computer', 'cup', 'door',
'fence', 'floor', 'flower', 'food', 'grass', 'ground',
'keyboard', 'light', 'mountain', 'mouse', 'curtain', 'platform',
'sign', 'plate', 'road', 'rock', 'shelves', 'sidewalk', 'sky',
'snow', 'bedclothes', 'track', 'tree', 'truck', 'wall', 'water',
'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
split=split,
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
|
import re
HD_SYLLABLE = re.compile('''(?P<ons>f|v|xy|x|s|z|y|h|
n?(?:pl|tx|ts|p|t|r|c|k|q)h?|h?|d|dh|
h?(?:ny|n|ml|m|l)|)
(?P<rhy>ee|oo|ai|aw|au|ia|ua|i|e|a|o|u|w)
(?P<ton>b|s|j|v|m|g|d|)''', flags=re.X)
ML_SYLLABLE = re.compile('''(?P<ons>f|v|xy|x|s|z|y|h|
n?(?:dl|pl|tx|ts|p|t|r|c|k|q)h?|
(?:ny|n|ml|m|hl|l)|)
(?P<rhy>aa|ee|oo|ai|aw|au|ua|i|e|a|o|u|w)
(?P<ton>b|s|j|v|m|g|d|)''', flags=re.X)
RPA_SYLLABLE = re.compile('''(?P<ons>f|v|xy|x|s|z|y|h|
n?(?:dl|pl|tx|ts|p|t|r|c|k|q)h?|h?|d|dh|
h?(?:ny|n|ml|m|l)|)
(?P<rhy>aa|ee|oo|ai|aw|au|ia|ua|i|e|a|o|u|w)
(?P<ton>b|s|j|v|m|g|d|)''', flags=re.X)
|
import demistomock as demisto
from Malwarebytes import scan_and_remediate, scan_and_report, isolate_endpoint, list_endpoint_info, \
isolate_process, isolate_network, isolate_desktop, deisolate_endpoint,\
scan_detections, scan_status, fetch_incidents, list_all_endpoints, open_sa_incident,\
remediate_sa_incident, close_sa_incident, \
get_sa_activities_command
auth_token = 'vATEZGHAxu5AoNzZZSR7URcnREqxaHcxGlZy0_3M8aU.j3KdAUwntnzvcjE4-UdVORUGvnR4eBVITn6KxoblFYc'
account_id = 'XXX-XXX-XXX-XXXX--XXXX'
client_id = 'NB-XXX-XXXX1234-1234'
MOCK_ENDPOINTS = 'https://cloud.malwarebytes.com/api/v2/endpoints'
MOCK_ENDPOINT_MACHINE_ID = 'https://cloud.malwarebytes.com/api/v2/endpoints'
USE_SSL = False
MOCK_ENDPOINT_MACHINE_ID_RESP = {
"aggregations": {},
"machines": [
{
"link": "/api/v2/machines/8c9df179-a999-4ca2-9c41-9795ae0b08f5",
"agent": {
"started_at_offset": 0,
"last_user": "WIN-TEN8D4FCOUB\\Administrator",
"at": "2020-01-01T04:20:49.784113Z",
"account_id": "56db16b7-7bcd-404b-9443-f4ed95044c64",
"group_id": "57b6dbcd-8243-4f52-b80a-7c102c6b06d3",
"nics": [
{
"ips": [
"192.168.230.140"
],
"description": "Intel(R) 82574L Gigabit Network Connection #2",
"mac_address": "000C29D7A627"
},
{
"ips": [
"172.16.128.100"
],
"description": "Intel(R) 82574L Gigabit Network Connection",
"mac_address": "000C29D7A631"
}
],
"os_info": {
"os_type": "Server",
"os_version": "6.3.9600",
"os_platform": "Windows",
"os_architecture": "Amd64",
"os_release_name": "Microsoft Windows Server 2012 R2 Standard"
},
"host_name": "WIN-TEN8D4FCOUB",
"fully_qualified_host_name": "WIN-TEN8D4FCOUB",
"plugins": {
"asset_manager": {
"product_name": "Asset Manager",
"plugin_version": "1.2.0.329"
},
"endpoint_detection_and_response": {
"product_name": "Endpoint Detection and Response",
"plugin_version": "1.2.0.282"
}
},
"engine_version": "1.2.0.726",
"policy_etag": "ae2dccc2e3eaa8b33d10f577f76ddc29"
},
"machine": {
"id": "8c9df179-a999-4ca2-9c41-9795ae0b08f5",
"online": False,
"account_id": "56db16b7-7bcd-404b-9443-f4ed95044c64",
"group_id": "57b6dbcd-8243-4f52-b80a-7c102c6b06d3",
"root_group_id": "57b6dbcd-8243-4f52-b80a-7c102c6b06d3",
"group_name": "Asia-Group-ServerEPR",
"policy_id": "2336247b-a41a-4f9c-8bf8-2e45e6cd41ff",
"policy_name": "Asia-ServerEPR-Policy",
"last_day_seen": "2020-01-01T04:03:36.046972Z",
"isolated": False,
"scan_age_days": 2147483647,
"suspicious_activity_count": 55,
"infection_count": 0,
"reboot_required": 0,
"is_deleted": False
}
}
],
"total_count": 1,
"next_cursor": ""
}
MOCK_HOSTNAME = 'WIN-TEN8D4FCOUB'
MOCK_IP = '192.168.1.1'
MOCK_MACHINES_DATA = {
"machines": [
{
"created_at": "2020-02-05T10:12:55.187467Z",
"id": "017febb6-ae68-4c15-9918-d911c72d062a",
"last_seen_at": "2020-02-26T16:36:17.352342Z",
"name": "TA-AZ-CLT1",
"online": True,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
},
{
"created_at": "2020-02-05T09:50:02.194556Z",
"id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"last_seen_at": "2020-03-11T11:00:24.746133Z",
"name": "WIN-TEN8D4FCOUB",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Enterprise"
},
{
"created_at": "2019-11-25T19:47:15.833008Z",
"id": "b5740188-00a2-434b-a180-5b0fa85cb10b",
"last_seen_at": "2020-02-27T15:36:33.68981Z",
"name": "DESKTOP-91UJNA1",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
},
{
"created_at": "2019-10-18T09:26:26.993555Z",
"id": "5074ade3-5716-44d8-83c7-5985379c0399",
"last_seen_at": "2020-03-05T13:17:15.459352Z",
"name": "DESKTOP-664HFM6",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
}
],
"total_count": 4,
"next_cursor": ""
}
MOCK_ENDPOINTS_JOBS = 'https://cloud.malwarebytes.com/api/v2'
MOCK_POST_JOBS_DATA = {
"jobs": [
{
"machine_id": "8c9df179-a999-4ca2-9c41-9795ae0b08f5",
"job_id": "aa104324-6d2f-4023-bfa6-78fc76d67200"
}
],
"total_count": 1
}
MOCK_JOBS_MACHINE_ID = '8c9df179-a999-4ca2-9c41-9795ae0b08f5'
MOCK_SCAN_ID = 'd6d46617-b99c-4758-aad2-0f8235c43d58'
MOCK_JOBS_JOB_ID = 'aa104324-6d2f-4023-bfa6-78fc76d67200'
MOCK_ENDPOINT_JOBS_OUTPUT = {
'Malwarebytes.Scan(val.Job_ID == obj.Job_ID)':
{
'Machine_ID': MOCK_JOBS_MACHINE_ID,
'Job_ID': MOCK_JOBS_JOB_ID
}
}
MOCK_GET_JOBS_DATA = {
"id": "554e6e03-6a31-4007-aee8-954e88093ef0",
"machine_id": "8c9df179-a999-4ca2-9c41-9795ae0b08f5",
"machine_name": "DESKTOP-LI4MQ7B",
"command": "command.threat.scan",
"issued_at": "2020-03-17T14:02:26.562283Z",
"issued_by": "54a39a8b-e368-4359-bf74-8358f8d4fc11",
"expires_at": "2020-03-20T14:02:26.562285Z",
"updated_at": "2020-03-17T14:12:09.230305Z",
"state": "COMPLETED",
"relay_state": "3fd16624-9d54-4e15-9d07-c222327d19fe",
"scan_id": "d6d46617-b99c-4758-aad2-0f8235c43d58"
}
MOCK_DETECTIONS_PATH = MOCK_ENDPOINT_MACHINE_ID + '/' + MOCK_JOBS_MACHINE_ID + '/scans/' + MOCK_SCAN_ID + '/detections'
MOCK_SCAN_DETECTIONS = {
"detections": [
{
"id": "f21ae327-8987-4d64-a0ed-12ffa5fdd7ba",
"scan_id": "74f831d2-2871-4aa5-9030-60ce7247d23a",
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"group_id": "a8fbe175-6b71-443c-b906-f18d06c7faf6",
"detection_id_from_endpoint": "0bd77668-5106-11ea-8531-000c29541586",
"scanned_at": "2020-02-16T21:39:20Z",
"scanned_at_local": "2020-02-16T13:39:20-08:00",
"reported_at": "2020-02-16T21:50:02.358811Z",
"status": "quarantined",
"threat_name": "Trojan.Agent.SVR",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\711.RAR",
"category": "Malware",
"ip_address": "",
"url": "",
"port": "",
"affected_application": "",
"md5": "",
"process_name": ""
},
{
"id": "f8707f3f-184c-4388-980f-5c2b4639c7ce",
"scan_id": "eeb99e04-ae61-47f7-bfbf-8e023fdfffe2",
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"group_id": "a8fbe175-6b71-443c-b906-f18d06c7faf6",
"detection_id_from_endpoint": "df7bdca4-503c-11ea-8339-000c29541586",
"scanned_at": "2020-02-15T21:39:19Z",
"scanned_at_local": "2020-02-15T13:39:19-08:00",
"reported_at": "2020-02-15T21:49:44.263558Z",
"status": "quarantined",
"threat_name": "Trojan.Agent.SVR",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\711.RAR",
"category": "Malware",
"ip_address": "",
"url": "",
"port": "",
"affected_application": "",
"md5": "",
"process_name": ""
},
{
"id": "6d76ebad-dfa3-47a8-8cac-a717fc09ae6d",
"scan_id": "ab4623d7-9c07-46fc-95b1-d0d15c64e72b",
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"group_id": "a8fbe175-6b71-443c-b906-f18d06c7faf6",
"detection_id_from_endpoint": "c814b08c-4f73-11ea-9b36-000c29541586",
"scanned_at": "2020-02-14T21:39:21Z",
"scanned_at_local": "2020-02-14T13:39:21-08:00",
"reported_at": "2020-02-14T22:00:57.806491Z",
"status": "quarantined",
"threat_name": "Trojan.ServStart",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\518_2.EXE",
"category": "Malware",
"ip_address": "",
"url": "",
"port": "",
"affected_application": "",
"md5": "",
"process_name": ""
}
],
"total_count": 3,
"next_cursor": ""
}
MOCK_SCAN_DETECTIONS_CLEAR = {
"detections": [
{
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"reported_at": "2020-02-16T21:50:02.358811Z",
"status": "quarantined",
"threat_name": "Trojan.Agent.SVR",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\711.RAR",
"category": "Malware",
},
{
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"reported_at": "2020-02-15T21:49:44.263558Z",
"status": "quarantined",
"threat_name": "Trojan.Agent.SVR",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\711.RAR",
"category": "Malware",
},
{
"machine_id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"machine_name": "WIN-TEN8D4FCOUB",
"reported_at": "2020-02-14T22:00:57.806491Z",
"status": "quarantined",
"threat_name": "Trojan.ServStart",
"type": [
"file"
],
"path": "C:\\USERS\\WIN-BOX2\\DESKTOP\\518_2.EXE",
"category": "Malware",
}
],
"total_count": 3,
"next_cursor": ""
}
MOCK_SA_CHOICE = 'Suspicious Activity (EPR)'
MOCK_SA_ENDPOINT = 'https://cloud.malwarebytes.com/api/v2/sa'
MOCK_RTP_CHOICE = 'RTP Detections (EP)'
MOCK_RTP_ENDPOINT = 'https://cloud.malwarebytes.com/api/v2/detections/search'
MOCK_SA_DATA = {
"sa": [
{
"detection_id_list": [
34036085
],
"status": "detected",
"timestamp": "2020-03-03T12:27:20.000Z",
"path": "C:\\USERS\\ROHIN SAMBATH KUMAR\\DESKTOP\\MA2EZOX5\\EKATI5862.EXE",
"pc_hostname": "DESKTOP-664HFM6",
"machine_id": "5074ade3-5716-44d8-83c7-5985379c0399",
"account_id": "2020bd17-a809-4102-b744-94fe8ad1c591",
"level": 2,
"detected_by_count": 1
}
],
"total_count": 1,
"next_cursor": ""
}
MOCK_SA_MACHINE_ID = '211d8c3e-142c-4849-b1f0-1680b4bd239c'
MOCK_SA_DETECTION_ID = '23606836'
MOCK_RTP_DETECTIONS_DATA = {
"detections": [
{
"id": "1ef4503a-a1d3-4072-adc0-a3113c68662b",
"type": [
"OutboundConnection"
],
"status": "blocked",
"path": "iptest.malwarebytes.com(100.24.169.13:49792)",
"group_id": "e61dd210-1fd1-443c-ae6d-6bc9240a562f",
"is_root_detection": True,
"machine_id": "e7f1475a-7e9a-409b-b7a9-ccf7e6e68779",
"account_id": "56db16b7-7bcd-404b-9443-f4ed95044c64",
"detection_id": "d0a49ab4-62f6-11ea-8d88-000c29286b23",
"scanned_at": "2020-03-10T17:44:39Z",
"scanned_at_offset_seconds": 0,
"reported_at": "2020-03-10T17:44:40.42176888Z",
"threat_name": "Malicious Website",
"category": "MWAC",
"is_rtp_stream_event": True,
"process_name": "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe",
"cleaned_at": "0001-01-01T00:00:00Z",
"machine_name": "Wampa.rebelbase.org"
}
],
"aggregations": {},
"total_count": 1,
"next_cursor": ""
}
def test_scan_and_remediate_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS, json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
scan_and_remediate(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_scan_and_remediate_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS, json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
scan_and_remediate(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_scan_and_report_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS, json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
scan_and_report(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_scan_and_report_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS, json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
scan_and_report(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_endpoint_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_endpoint(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_endpoint_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_endpoint(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_process_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_process(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_process_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_process(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_desktop_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_desktop(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_desktop_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_desktop(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_network_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_network(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_isolate_network_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/isolate', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
isolate_network(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_deisolate_endpoint_ip(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/unlock', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
deisolate_endpoint(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_deisolate_endpoint_hostname(requests_mock, mocker):
MOCK_ENDPOINT_POST_JOBS = 'https://cloud.malwarebytes.com/api/v2/jobs'
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINT_MACHINE_ID, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.post(MOCK_ENDPOINT_POST_JOBS + '/endpoints/unlock', json=MOCK_POST_JOBS_DATA, status_code=201)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
deisolate_endpoint(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_JOBS_OUTPUT == outputs['EntryContext']
def test_list_all_endpoints_all(requests_mock, mocker):
# patch the API endpoint
requests_mock.get(MOCK_ENDPOINTS, json=MOCK_MACHINES_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'endpoints': 'all'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
endpoint = demisto.args().get('endpoints')
list_all_endpoints(account_id, client_id, auth_token, endpoint, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert str(MOCK_MACHINES_DATA['machines']) == str(outputs['Contents'])
def test_list_all_endpoints_online(requests_mock, mocker):
MOCK_MACHINES_DATA_ONLINE = {
"machines": [
{
"created_at": "2020-02-05T10:12:55.187467Z",
"id": "017febb6-ae68-4c15-9918-d911c72d062a",
"last_seen_at": "2020-02-26T16:36:17.352342Z",
"name": "TA-AZ-CLT1",
"online": True,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
}
]
}
# patch the API endpoint
requests_mock.get(MOCK_ENDPOINTS, json=MOCK_MACHINES_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'endpoints': 'online'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
endpoint = demisto.args().get('endpoints')
list_all_endpoints(account_id, client_id, auth_token, endpoint, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert str(MOCK_MACHINES_DATA_ONLINE['machines']) == str(outputs['Contents'])
def test_list_all_endpoints_offline(requests_mock, mocker):
MOCK_MACHINES_DATA_OFFLINE = {
"machines": [
{
"created_at": "2020-02-05T09:50:02.194556Z",
"id": "211d8c3e-142c-4849-b1f0-1680b4bd239c",
"last_seen_at": "2020-03-11T11:00:24.746133Z",
"name": "WIN-TEN8D4FCOUB",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Enterprise"
},
{
"created_at": "2019-11-25T19:47:15.833008Z",
"id": "b5740188-00a2-434b-a180-5b0fa85cb10b",
"last_seen_at": "2020-02-27T15:36:33.68981Z",
"name": "DESKTOP-91UJNA1",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
},
{
"created_at": "2019-10-18T09:26:26.993555Z",
"id": "5074ade3-5716-44d8-83c7-5985379c0399",
"last_seen_at": "2020-03-05T13:17:15.459352Z",
"name": "DESKTOP-664HFM6",
"online": False,
"os_architecture": "AMD64",
"os_platform": "WINDOWS",
"os_release_name": "Microsoft Windows 10 Pro"
}
]
}
# patch the API endpoint
requests_mock.get(MOCK_ENDPOINTS, json=MOCK_MACHINES_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'endpoints': 'offline'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
endpoint = demisto.args().get('endpoints')
list_all_endpoints(account_id, client_id, auth_token, endpoint, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert str(MOCK_MACHINES_DATA_OFFLINE['machines']) == str(outputs['Contents'])
def test_list_endpoint_info_hostname(requests_mock, mocker):
MOCK_ASSET_ID = '8c9df179-a999-4ca2-9c41-9795ae0b08f5'
MOCK_ASSETS_RESPONSE = {
"startups": [
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Shell",
"value": "explorer.exe"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "System",
"value": ""
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Taskman",
"value": ""
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Userinit",
"value": "C:\\Windows\\system32\\userinit.exe,"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run",
"name": "VMware User Process",
"value": "\"C:\\Program Files\\VMware\\VMware Tools\\vmtoolsd.exe\" -n vmusr"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\ShellServiceObjectDelayLoad",
"name": "WebCheck",
"value": "{E6FB5E20-DE35-11CF-9C87-00AA005127ED}"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Authentication Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Notification Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Security Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\SecurityProviders",
"name": "SecurityProviders",
"value": "credssp.dll, pwdssp.dll"
}
],
"os_info": {
"os_platform": "Windows",
"os_architecture": "Amd64",
"os_version": "6.3.9600",
"os_release_name": "Microsoft Windows Server 2012 R2 Standard",
"os_type": "Server"
},
"memory": {
"total_virtual": 2549530624,
"free_virtual": 765550592,
"total_physical": 2147483648,
"free_physical": 873046016
},
"computer_info": {
"manufacturer": "VMware, Inc.",
"model": "VMware Virtual Platform"
},
"software_installed": [
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Help Viewer 2.2",
"installed_date": "2019-01-07T16:00:00Z",
"version": "2.2.23107"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Management Console",
"installed_date": "2017-03-26T16:00:00Z",
"version": "1.8.0.3431"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 SDK",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51641"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2008 Redistributable - x86 9.0.30729.4148",
"installed_date": "2016-12-04T16:00:00Z",
"version": "9.0.30729.4148"
},
{
"vendor": "Microsoft Corporation",
"product": "IIS 7.5 Express",
"installed_date": "2016-12-04T16:00:00Z",
"version": "7.5.1070"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2017 Policies ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2014 Management Objects ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.2000.8"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.2 Multi-Targeting Pack (ENU)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51209"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50710"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Endpoint Agent",
"installed_date": "2019-11-15T16:00:00Z",
"version": "1.2.0.717"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50932"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft System CLR Types for SQL Server 2014",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.2402.11"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2015 Redistributable (x86) - 14.0.23026",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23026.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2013 Redistributable (x64) - 12.0.21005",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.21005.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Data-Tier Application Framework (x86)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.4127.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Management Studio - 17.9.1",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.17289.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio Tools for Applications 2015",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23829"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Management Server",
"installed_date": "2017-03-26T16:00:00Z",
"version": "1.8.0.3431"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.2 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51209"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio Tools for Applications 2015 Language Support",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23107.20"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Browser",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2013 Redistributable (x86) - 12.0.21005",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.21005.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio 2015 Shell (Isolated)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23107.10"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 Multi-Targeting Pack (ENU)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50932"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2015 Redistributable (x64) - 14.0.23026",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23026.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 (64-bit)",
"installed_date": "2016-12-05T16:00:00Z",
"version": ""
},
{
"vendor": "VMware, Inc.",
"product": "VMware Tools",
"installed_date": "2016-12-04T16:00:00Z",
"version": "10.0.10.4301679"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 Native Client",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server VSS Writer",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2012 Native Client ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "11.3.6540.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Active Directory Authentication Library for SQL Server",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2008 Redistributable - x64 9.0.30729.6161",
"installed_date": "2016-12-04T16:00:00Z",
"version": "9.0.30729.6161"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes version 3.8.4.2971",
"installed_date": "2019-11-15T16:00:00Z",
"version": "3.8.4.2971"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 Setup (English)",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft ODBC Driver 13 for SQL Server",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft System CLR Types for SQL Server 2017",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2017 T-SQL Language Service ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.17289.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 Setup Support Files ",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.1.2731.0"
}
],
"nics": [
{
"mac_address": "000C29D7A627",
"description": "Intel(R) 82574L Gigabit Network Connection #2",
"ips": [
"192.168.230.140"
]
},
{
"mac_address": "000C29D7A631",
"description": "Intel(R) 82574L Gigabit Network Connection",
"ips": [
"172.16.128.100"
]
}
],
"drives": [
{
"freespace_available": 47086694400,
"volume_label": "",
"drive_format": "NTFS",
"freespace_total": 47086694400,
"name": "C:\\",
"total_size": 64422408192
}
],
"updates_installed": [
{
"installed_date": "2019-01-07T19:04:46Z",
"title": "Update for Windows (KB2999226)"
}
],
"domain_name": "",
"culture": "en-US",
"object_sid": "",
"dhcp_scope_name": "",
"time_zone": "Asia/Shanghai",
"host_name": "WIN-TEN8D4FCOUB",
"fully_qualified_host_name": "WIN-TEN8D4FCOUB",
"object_guid": "",
"plugin_version": "1.2.0.329"
}
MOCK_ENDPOINT_HOSTNAME_OUTPUT = {
'Malwarebytes.Endpoint(val.Hostname == obj.Hostname)': {
'Hostname': MOCK_ASSETS_RESPONSE['host_name'],
'IPAddress': MOCK_ASSETS_RESPONSE['nics'],
'Domain': MOCK_ASSETS_RESPONSE['domain_name'],
'MACAddress': MOCK_ASSETS_RESPONSE['nics'][0]['mac_address'],
'OS': MOCK_ASSETS_RESPONSE['os_info']['os_platform'],
'OSVersion': MOCK_ASSETS_RESPONSE['os_info']['os_version'],
'Model': MOCK_ASSETS_RESPONSE['computer_info']['model'],
'Memory': MOCK_ASSETS_RESPONSE['memory'],
'Assets': MOCK_ASSETS_RESPONSE
},
'Endpoint(val.Hostname == obj.Hostname)': {
'Hostname': MOCK_ASSETS_RESPONSE['host_name'],
'IPAddress': MOCK_ASSETS_RESPONSE['nics'][0]['ips'][0],
'Domain': MOCK_ASSETS_RESPONSE['domain_name'],
'MACAddress': MOCK_ASSETS_RESPONSE['nics'][0]['mac_address'],
'OS': MOCK_ASSETS_RESPONSE['os_info']['os_platform'],
'OSVersion': MOCK_ASSETS_RESPONSE['os_info']['os_version'],
'Model': MOCK_ASSETS_RESPONSE['computer_info']['model'],
'Memory': MOCK_ASSETS_RESPONSE['memory']['total_virtual']
}
}
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINTS, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.get(MOCK_ENDPOINTS + '/' + MOCK_ASSET_ID + '/assets', json=MOCK_ASSETS_RESPONSE)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': MOCK_HOSTNAME})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
list_endpoint_info(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert str(MOCK_ENDPOINT_HOSTNAME_OUTPUT) == str(outputs['EntryContext'])
def test_list_endpoint_info_ip(requests_mock, mocker):
MOCK_ASSET_ID = '8c9df179-a999-4ca2-9c41-9795ae0b08f5'
MOCK_ASSETS_RESPONSE = {
"startups": [
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Shell",
"value": "explorer.exe"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "System",
"value": ""
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Taskman",
"value": ""
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon",
"name": "Userinit",
"value": "C:\\Windows\\system32\\userinit.exe,"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run",
"name": "VMware User Process",
"value": "\"C:\\Program Files\\VMware\\VMware Tools\\vmtoolsd.exe\" -n vmusr"
},
{
"key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\ShellServiceObjectDelayLoad",
"name": "WebCheck",
"value": "{E6FB5E20-DE35-11CF-9C87-00AA005127ED}"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Authentication Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Notification Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Lsa",
"name": "Security Packages"
},
{
"key": "HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\SecurityProviders",
"name": "SecurityProviders",
"value": "credssp.dll, pwdssp.dll"
}
],
"os_info": {
"os_platform": "Windows",
"os_architecture": "Amd64",
"os_version": "6.3.9600",
"os_release_name": "Microsoft Windows Server 2012 R2 Standard",
"os_type": "Server"
},
"memory": {
"total_virtual": 2549530624,
"free_virtual": 765550592,
"total_physical": 2147483648,
"free_physical": 873046016
},
"computer_info": {
"manufacturer": "VMware, Inc.",
"model": "VMware Virtual Platform"
},
"software_installed": [
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Help Viewer 2.2",
"installed_date": "2019-01-07T16:00:00Z",
"version": "2.2.23107"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Management Console",
"installed_date": "2017-03-26T16:00:00Z",
"version": "1.8.0.3431"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 SDK",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51641"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2008 Redistributable - x86 9.0.30729.4148",
"installed_date": "2016-12-04T16:00:00Z",
"version": "9.0.30729.4148"
},
{
"vendor": "Microsoft Corporation",
"product": "IIS 7.5 Express",
"installed_date": "2016-12-04T16:00:00Z",
"version": "7.5.1070"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2017 Policies ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2014 Management Objects ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.2000.8"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.2 Multi-Targeting Pack (ENU)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51209"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50710"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Endpoint Agent",
"installed_date": "2019-11-15T16:00:00Z",
"version": "1.2.0.717"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50932"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft System CLR Types for SQL Server 2014",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.2402.11"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2015 Redistributable (x86) - 14.0.23026",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23026.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2013 Redistributable (x64) - 12.0.21005",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.21005.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Data-Tier Application Framework (x86)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.4127.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Management Studio - 17.9.1",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.17289.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio Tools for Applications 2015",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23829"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes Management Server",
"installed_date": "2017-03-26T16:00:00Z",
"version": "1.8.0.3431"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.2 Multi-Targeting Pack",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.51209"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio Tools for Applications 2015 Language Support",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23107.20"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server Browser",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2013 Redistributable (x86) - 12.0.21005",
"installed_date": "2019-01-07T16:00:00Z",
"version": "12.0.21005.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual Studio 2015 Shell (Isolated)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23107.10"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft .NET Framework 4.5.1 Multi-Targeting Pack (ENU)",
"installed_date": "2019-01-07T16:00:00Z",
"version": "4.5.50932"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2015 Redistributable (x64) - 14.0.23026",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.23026.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 (64-bit)",
"installed_date": "2016-12-05T16:00:00Z",
"version": ""
},
{
"vendor": "VMware, Inc.",
"product": "VMware Tools",
"installed_date": "2016-12-04T16:00:00Z",
"version": "10.0.10.4301679"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 Native Client",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server VSS Writer",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2012 Native Client ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "11.3.6540.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Active Directory Authentication Library for SQL Server",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft Visual C++ 2008 Redistributable - x64 9.0.30729.6161",
"installed_date": "2016-12-04T16:00:00Z",
"version": "9.0.30729.6161"
},
{
"vendor": "Malwarebytes",
"product": "Malwarebytes version 3.8.4.2971",
"installed_date": "2019-11-15T16:00:00Z",
"version": "3.8.4.2971"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 R2 Setup (English)",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.50.1600.1"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft ODBC Driver 13 for SQL Server",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft System CLR Types for SQL Server 2017",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.1000.169"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2017 T-SQL Language Service ",
"installed_date": "2019-01-07T16:00:00Z",
"version": "14.0.17289.0"
},
{
"vendor": "Microsoft Corporation",
"product": "Microsoft SQL Server 2008 Setup Support Files ",
"installed_date": "2016-12-05T16:00:00Z",
"version": "10.1.2731.0"
}
],
"nics": [
{
"mac_address": "000C29D7A627",
"description": "Intel(R) 82574L Gigabit Network Connection #2",
"ips": [
"192.168.230.140"
]
},
{
"mac_address": "000C29D7A631",
"description": "Intel(R) 82574L Gigabit Network Connection",
"ips": [
"172.16.128.100"
]
}
],
"drives": [
{
"freespace_available": 47086694400,
"volume_label": "",
"drive_format": "NTFS",
"freespace_total": 47086694400,
"name": "C:\\",
"total_size": 64422408192
}
],
"updates_installed": [
{
"installed_date": "2019-01-07T19:04:46Z",
"title": "Update for Windows (KB2999226)"
}
],
"domain_name": "",
"culture": "en-US",
"object_sid": "",
"dhcp_scope_name": "",
"time_zone": "Asia/Shanghai",
"host_name": "WIN-TEN8D4FCOUB",
"fully_qualified_host_name": "WIN-TEN8D4FCOUB",
"object_guid": "",
"plugin_version": "1.2.0.329"
}
MOCK_ENDPOINT_HOSTNAME_OUTPUT = {
'Malwarebytes.Endpoint(val.Hostname == obj.Hostname)': {
'Hostname': MOCK_ASSETS_RESPONSE['host_name'],
'IPAddress': MOCK_ASSETS_RESPONSE['nics'],
'Domain': MOCK_ASSETS_RESPONSE['domain_name'],
'MACAddress': MOCK_ASSETS_RESPONSE['nics'][0]['mac_address'],
'OS': MOCK_ASSETS_RESPONSE['os_info']['os_platform'],
'OSVersion': MOCK_ASSETS_RESPONSE['os_info']['os_version'],
'Model': MOCK_ASSETS_RESPONSE['computer_info']['model'],
'Memory': MOCK_ASSETS_RESPONSE['memory'],
'Assets': MOCK_ASSETS_RESPONSE
},
'Endpoint(val.Hostname == obj.Hostname)': {
'Hostname': MOCK_ASSETS_RESPONSE['host_name'],
'IPAddress': MOCK_ASSETS_RESPONSE['nics'][0]['ips'][0],
'Domain': MOCK_ASSETS_RESPONSE['domain_name'],
'MACAddress': MOCK_ASSETS_RESPONSE['nics'][0]['mac_address'],
'OS': MOCK_ASSETS_RESPONSE['os_info']['os_platform'],
'OSVersion': MOCK_ASSETS_RESPONSE['os_info']['os_version'],
'Model': MOCK_ASSETS_RESPONSE['computer_info']['model'],
'Memory': MOCK_ASSETS_RESPONSE['memory']['total_virtual']
}
}
# patch the API endpoint
requests_mock.post(MOCK_ENDPOINTS, json=MOCK_ENDPOINT_MACHINE_ID_RESP)
requests_mock.get(MOCK_ENDPOINTS + '/' + MOCK_ASSET_ID + '/assets', json=MOCK_ASSETS_RESPONSE)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'ip': MOCK_IP})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
ip = demisto.args().get('ip')
hostname = demisto.args().get('hostname')
list_endpoint_info(account_id, client_id, auth_token, ip, hostname, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert str(MOCK_ENDPOINT_HOSTNAME_OUTPUT) == str(outputs['EntryContext'])
def test_scan_detections_job_id(requests_mock, mocker):
MOCK_ENDPOINT_DETECTION_OUTPUT = {
'Malwarebytes.Scan(val.Job_ID == obj.Job_ID)':
{
'Job_ID': MOCK_JOBS_JOB_ID,
'Status': 'COMPLETED',
'Detections': MOCK_SCAN_DETECTIONS_CLEAR["detections"]
}
}
# patch the API endpoint
requests_mock.get(MOCK_ENDPOINTS_JOBS + '/jobs/' + MOCK_JOBS_JOB_ID, json=MOCK_GET_JOBS_DATA)
requests_mock.get(MOCK_ENDPOINTS_JOBS + '/jobs/' + MOCK_JOBS_JOB_ID, json=MOCK_GET_JOBS_DATA)
requests_mock.get(MOCK_ENDPOINTS_JOBS + '/jobs/' + MOCK_JOBS_JOB_ID, json=MOCK_GET_JOBS_DATA)
requests_mock.get(MOCK_DETECTIONS_PATH, json=MOCK_SCAN_DETECTIONS)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'job_id': MOCK_JOBS_JOB_ID})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
job_id = demisto.args().get('job_id')
scan_detections(account_id, client_id, auth_token, job_id, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_DETECTION_OUTPUT == outputs['EntryContext']
def test_scan_status_job_id(requests_mock, mocker):
MOCK_ENDPOINT_SCANSTATUS_OUTPUT = {
'Malwarebytes.Scan(val.Job_ID == obj.Job_ID)':
{
'Job_ID': MOCK_JOBS_JOB_ID,
'Status': 'COMPLETED'
}
}
# patch the API endpoint
requests_mock.get(MOCK_ENDPOINTS_JOBS + '/jobs/' + MOCK_JOBS_JOB_ID, json=MOCK_GET_JOBS_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'job_id': MOCK_JOBS_JOB_ID})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
job_id = demisto.args().get('job_id')
scan_status(account_id, client_id, auth_token, job_id, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_ENDPOINT_SCANSTATUS_OUTPUT == outputs['EntryContext']
def test_fetch_incidents_epr(requests_mock, mocker):
# patch the API endpoint
requests_mock.get(MOCK_SA_ENDPOINT, json=MOCK_SA_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'Fetch_Event_List': MOCK_SA_CHOICE,
'suspicious_activity_severity': 'High'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
event_list = demisto.params().get('Fetch_Event_List')
fetch_incidents(account_id, client_id, auth_token, event_list, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert outputs['Contents'] == '[]'
def test_fetch_incidents_ep(requests_mock, mocker):
# patch the API endpoint
requests_mock.post(MOCK_RTP_ENDPOINT, json=MOCK_RTP_DETECTIONS_DATA)
# patch the inputs
mocker.patch.object(demisto, 'args', return_value={'Fetch_Event_List': MOCK_RTP_CHOICE, 'rtp_threat_category': 'Malware'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
event_list = demisto.params().get('Fetch_Event_List')
fetch_incidents(account_id, client_id, auth_token, event_list, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert outputs['Contents'] == '[]'
def test_open_sa_incident(requests_mock, mocker):
MOCK_OPEN = MOCK_ENDPOINTS + '/' + MOCK_SA_MACHINE_ID + '/sa/' + MOCK_SA_DETECTION_ID + '/open'
MOCK_OPEN_OUTPUT = {'Malwarebytes.SA(val.Machine_ID == obj.Machine_ID)': {'Machine_ID': MOCK_SA_MACHINE_ID}}
# patch the API endpoint
requests_mock.put(MOCK_OPEN, json={}, status_code=201)
# path the inputs
mocker.patch.object(demisto, 'args', return_value={'machine_id': MOCK_SA_MACHINE_ID, 'detection_id': MOCK_SA_DETECTION_ID})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
machine_id = demisto.args().get('machine_id')
detection_id = demisto.args().get('detection_id')
open_sa_incident(account_id, client_id, auth_token, machine_id, detection_id, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_OPEN_OUTPUT == outputs['EntryContext']
def test_remediate_sa_incident(requests_mock, mocker):
MOCK_REMEDIATE = MOCK_ENDPOINTS + '/' + MOCK_SA_MACHINE_ID + '/sa/' + MOCK_SA_DETECTION_ID + '/remediate'
MOCK_REMEDIATE_OUTPUT = {'Malwarebytes.SA(val.Machine_ID == obj.Machine_ID)': {'Machine_ID': MOCK_SA_MACHINE_ID}}
# patch the API endpoint
requests_mock.post(MOCK_REMEDIATE, json={}, status_code=201)
# path the inputs
mocker.patch.object(demisto, 'args', return_value={'machine_id': MOCK_SA_MACHINE_ID, 'detection_id': MOCK_SA_DETECTION_ID})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
machine_id = demisto.args().get('machine_id')
detection_id = demisto.args().get('detection_id')
remediate_sa_incident(account_id, client_id, auth_token, machine_id, detection_id, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_REMEDIATE_OUTPUT == outputs['EntryContext']
def test_close_sa_incident(requests_mock, mocker):
MOCK_CLOSE = MOCK_ENDPOINTS + '/' + MOCK_SA_MACHINE_ID + '/sa/' + MOCK_SA_DETECTION_ID + '/close'
MOCK_CLOSE_OUTPUT = {'Malwarebytes.SA(val.Machine_ID == obj.Machine_ID)': {'Machine_ID': MOCK_SA_MACHINE_ID}}
# patch the API endpoint
requests_mock.put(MOCK_CLOSE, json={}, status_code=201)
# path the inputs
mocker.patch.object(demisto, 'args', return_value={'machine_id': MOCK_SA_MACHINE_ID, 'detection_id': MOCK_SA_DETECTION_ID})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
machine_id = demisto.args().get('machine_id')
detection_id = demisto.args().get('detection_id')
close_sa_incident(account_id, client_id, auth_token, machine_id, detection_id, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_CLOSE_OUTPUT == outputs['EntryContext']
def test_get_sa_activities_command_hostname(requests_mock, mocker):
MOCK_SA_OUTPUT = {
'Malwarebytes.Endpoint(val.Suspicious_Activities == obj.Suspicious_Activities)':
{
'Suspicious_Activities': MOCK_SA_DATA['sa']
}
}
# patch the API endpoint
requests_mock.get(MOCK_SA_ENDPOINT, json=MOCK_SA_DATA)
# path the inputs
mocker.patch.object(demisto, 'args', return_value={'hostname': 'DESKTOP-664HFM6'})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
hostname = demisto.args().get('hostname')
path = demisto.args().get('path')
get_sa_activities_command(account_id, client_id, auth_token, hostname, path, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_SA_OUTPUT == outputs['EntryContext']
def test_get_sa_activities_command_path(requests_mock, mocker):
MOCK_SA_OUTPUT = {
'Malwarebytes.Endpoint(val.Suspicious_Activities == obj.Suspicious_Activities)':
{
'Suspicious_Activities': MOCK_SA_DATA['sa']
}
}
# patch the API endpoint
requests_mock.get(MOCK_SA_ENDPOINT, json=MOCK_SA_DATA)
# path the inputs
mocker.patch.object(demisto, 'args',
return_value={'path': "C:\\USERS\\ROHIN SAMBATH KUMAR\\DESKTOP\\MA2EZOX5\\EKATI5862.EXE"})
# patch the outputs
mocker.patch.object(demisto, 'results')
# run the code
hostname = demisto.args().get('hostname')
path = demisto.args().get('path')
get_sa_activities_command(account_id, client_id, auth_token, hostname, path, USE_SSL)
# assert the outputs
assert demisto.results.call_count == 1
outputs = demisto.results.call_args[0][0]
assert MOCK_SA_OUTPUT == outputs['EntryContext']
|
# A quick intro to implement the Distributed Data Parallel (DDP) training in Pytorch.
# To simply this example, we directly load the ResNet50 using ```torch.hub.load()```,
# and train it from the scratch using the CIFAR10 dataset.
# Run this python script in terminal like "python3 DDP_training.py -n 1 -g 8 -nr 0"
import os
from datetime import datetime
import argparse
import torchvision
import torchvision.transforms as transforms
import torch
from collections import namedtuple
import warnings
from torch import nn, Tensor
import torch.nn.functional as F
from typing import Callable, Any, Optional, Tuple, List
import pickle as pkl
TIMES = []
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-nr', '--nr', default=0, type=int,
help='ranking within the nodes')
parser.add_argument('-e', '--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch', default=16, type=int,
help='batch size')
parser.add_argument('-d', '--directory', default=16, type=int,
help='parent directory of pickle dump')
parser.add_argument('-f', '--frac', default=.35, type=float,
help='per process memory fraction')
args = parser.parse_args()
torch.cuda.set_per_process_memory_fraction(args.frac)
train(args, gpu=args.nr)
def train(args, gpu):
model = inception_v3(pretrained=False)
model.train()
torch.manual_seed(0)
torch.cuda.set_device(gpu)
model.cuda(gpu)
batch_size = args.batch
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), 1e-4)
# Data loading code
transform = transforms.Compose([
transforms.Resize(325),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize((.485,.456,.406), (0.229,.224,.225))])
trainset = torchvision.datasets.CIFAR10(root='~/data',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=batch_size,
shuffle=False,
num_workers=4,
pin_memory=True)
total_step = min(len(trainloader), 240)
train_start = datetime.now()
trainload_time = datetime.now()
for epoch in range(args.epochs):
start = datetime.now()
for i, (images, labels) in enumerate(trainloader):
if i > total_step:
break
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 10 == 0 and gpu == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, args.epochs, i + 1, total_step,
loss.item()))
TIMES.append((datetime.now() - start).microseconds)
print("Training complete in: " + str(TIMES[-1]))
start = datetime.now()
print("Training done, total epoch {}, total time {}".format(args.epochs, datetime.now()-train_start))
print('===========================')
print(sum(TIMES) / len(TIMES))
print('===========================')
def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "Inception3":
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
kwargs['init_weights'] = False # we are loading weights from a pretrained model
model = Inception3(**kwargs)
state_dict = load_state_dict_from_url(model_urls['inception_v3_google'],
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
inception_blocks: Optional[List[Callable[..., nn.Module]]] = None,
init_weights: Optional[bool] = None
) -> None:
super(Inception3, self).__init__()
if inception_blocks is None:
inception_blocks = [
BasicConv2d, InceptionA, InceptionB, InceptionC,
InceptionD, InceptionE
]
if init_weights is None:
warnings.warn('The default weight initialization of inception_v3 will be changed in future releases of '
'torchvision. If you wish to keep the old behavior (which leads to long initialization times'
' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning)
init_weights = True
assert len(inception_blocks) == 6
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
self.transform_input = transform_input
self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
self.Mixed_7a = inception_d(768)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout()
self.fc = nn.Linear(2048, num_classes)
if init_weights:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x: Tensor) -> Tensor:
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = self.maxpool1(x)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = self.maxpool2(x)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = self.avgpool(x)
# N x 2048 x 1 x 1
x = self.dropout(x)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
return x
def forward(self, x: Tensor):
x = self._transform_input(x)
return self._forward(x)
class InceptionA(nn.Module):
def __init__(
self,
in_channels: int,
pool_features: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionA, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionB, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(
self,
in_channels: int,
channels_7x7: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionC, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionD, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionE, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class BasicConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
**kwargs: Any
) -> None:
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
if __name__ == '__main__':
main()
|
def lag(series, periods):
return series.shift(periods).fillna(0)
|
import unittest, json
from etk.knowledge_graph import KGSchema
from etk.etk import ETK
from etk.etk_exceptions import KgValueError
from datetime import date, datetime
from etk.ontology_api import Ontology
from etk.ontology_namespacemanager import DIG
class TestKnowledgeGraph(unittest.TestCase):
def setUp(self):
sample_doc = {
"projects": [
{
"name": "etk",
"description": "version 2 of etk, implemented by Runqi12 Shao, Dongyu Li, Sylvia lin, Amandeep and others.",
"members": [
"dongyu",
"amandeep",
"sylvia",
"Runqi12"
],
"date": "2007-12-05",
"place": "columbus:georgia:united states:-84.98771:32.46098",
"s": "segment_test_1"
},
{
"name": "rltk",
"description": "record linkage toolkit, implemented by Pedro, Mayank, Yixiang and several students.",
"members": [
"mayank",
"yixiang"
],
"date": ["2007-12-05T23:19:00"],
"cost": -3213.32,
"s": "segment_test_2"
}
]
}
kg_schema = KGSchema(json.load(open('etk/unit_tests/ground_truth/test_config.json')))
etk = ETK(kg_schema)
self.doc = etk.create_document(sample_doc)
def test_add_segment_kg(self) -> None:
sample_doc = self.doc
segments = sample_doc.select_segments("projects[*].s")
sample_doc.kg.add_value("segment", segments)
expected_segments = ["segment_test_1", "segment_test_2"]
self.assertTrue(sample_doc.kg.value["segment"][0]["key"] in expected_segments)
self.assertTrue(sample_doc.kg.value["segment"][1]["key"] in expected_segments)
self.assertTrue('provenances' in sample_doc.value)
provenances = sample_doc.value['provenances']
self.assertTrue(len(provenances) == 2)
self.assertTrue(provenances[0]['reference_type'] == 'location')
def test_KnowledgeGraph(self) -> None:
sample_doc = self.doc
try:
sample_doc.kg.add_value("developer", json_path="projects[*].members[*]")
except KgValueError:
pass
try:
sample_doc.kg.add_value("test_date", json_path="projects[*].date[*]")
except KgValueError:
pass
try:
sample_doc.kg.add_value("test_add_value_date",
value=[date(2018, 3, 28), {}, datetime(2018, 3, 28, 1, 1, 1)])
except KgValueError:
pass
try:
sample_doc.kg.add_value("test_location", json_path="projects[*].place")
except KgValueError:
pass
try:
sample_doc.kg.add_value("test_non_empty", value="")
sample_doc.kg.add_value("test_non_empty", value="non-empty")
sample_doc.kg.add_value("test_empty", value="", discard_empty=False)
sample_doc.kg.add_value("test_empty", value="empty", discard_empty=False)
except KgValueError:
pass
expected_developers = [
{
"value": "dongyu",
"key": "dongyu"
},
{
"value": "amandeep",
"key": "amandeep"
},
{
"value": "sylvia",
"key": "sylvia"
},
{
"value": "Runqi12",
"key": "runqi12"
},
{
"value": "mayank",
"key": "mayank"
},
{
"value": "yixiang",
"key": "yixiang"
}
]
expected_date = [
{
"value": "2007-12-05T00:00:00",
"key": "2007-12-05T00:00:00"
},
{
"value": "2007-12-05T23:19:00",
"key": "2007-12-05T23:19:00"
}
]
expected_add_value_date = [
{
"value": "2018-03-28",
"key": "2018-03-28"
},
{
"value": "2018-03-28T01:01:01",
"key": "2018-03-28T01:01:01"
}
]
expected_location = [
{
"value": "columbus:georgia:united states:-84.98771:32.46098",
"key": "columbus:georgia:united states:-84.98771:32.46098"
}
]
expected_non_empty = [{"key": "non-empty", "value": "non-empty"}]
expected_empty = [{"key": "", "value": ""}, {"key": "empty", "value": "empty"}]
self.assertEqual(expected_developers, sample_doc.kg.value["developer"])
self.assertEqual(expected_date, sample_doc.kg.value["test_date"])
self.assertEqual(expected_location, sample_doc.kg.value["test_location"])
self.assertEqual(expected_add_value_date, sample_doc.kg.value["test_add_value_date"])
self.assertEqual(expected_non_empty, sample_doc.kg.value["test_non_empty"])
self.assertEqual(expected_empty, sample_doc.kg.value["test_empty"])
class TestKnowledgeGraphWithOntology(unittest.TestCase):
def setUp(self):
ontology_content = '''
@prefix : <http://dig.isi.edu/ontologies/dig/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix schema: <http://schema.org/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
:Person a owl:Class ;
rdfs:subClassOf :Actor, :Biological_Object ;
:common_properties :label, :title, :religion ; .
:has_name a owl:DatatypeProperty ;
schema:domainIncludes :Person ;
schema:rangeIncludes xsd:string ; .
:has_child a owl:ObjectProperty ;
schema:domainIncludes :Person ;
schema:rangeIncludes :Person ; .
'''
ontology = Ontology(ontology_content, validation=False, include_undefined_class=True, quiet=True)
kg_schema = KGSchema(ontology.merge_with_master_config(dict()))
etk = ETK(kg_schema=kg_schema, ontology=ontology, generate_json_ld=True)
etk2 = ETK(kg_schema=kg_schema, ontology=ontology, generate_json_ld=False)
self.doc = etk.create_document(dict(), doc_id='http://xxx/1', type_=[DIG.Person.toPython()])
self.doc2 = etk2.create_document(dict(), doc_id='http://xxx/2', type_=[DIG.Person.toPython()])
def test_valid_kg_jsonld(self):
kg = self.doc.kg
self.assertIn('@id', kg._kg)
self.assertEqual('http://xxx/1', kg._kg['@id'])
self.assertIn('@type', kg._kg)
self.assertIn(DIG.Person.toPython(), kg._kg['@type'])
def test_valid_kg(self):
kg = self.doc2.kg
self.assertNotIn('@id', kg._kg)
self.assertNotIn('@type', kg._kg)
def test_add_value_kg_jsonld(self):
kg = self.doc.kg
field_name = kg.context_resolve(DIG.has_name)
self.assertEqual('has_name', field_name)
kg.add_value(field_name, 'Jack')
self.assertIn({'@value': 'Jack'}, kg._kg[field_name])
field_child = kg.context_resolve(DIG.has_child)
self.assertEqual('has_child', field_child)
child1 = 'http://xxx/2'
child2 = {'@id': 'http://xxx/3', 'has_name': 'Daniels', '@type': [DIG.Person],
'@context': {'has_name': DIG.has_name.toPython()}}
kg.add_value(field_child, child1)
kg.add_value(field_child, child2)
self.assertIn({'@id': 'http://xxx/2'}, kg._kg[field_child])
def test_add_value_kg(self):
kg = self.doc2.kg
field_name = kg.context_resolve(DIG.has_name)
self.assertEqual('has_name', field_name)
kg.add_value(field_name, 'Jack')
self.assertIn({'value': 'Jack', "key": "jack"}, kg._kg[field_name])
|
# flake8: noqa
import base64
import collections
import datetime
import inspect
import os
import os.path as osp
import pickle
import re
import subprocess
import sys
import dateutil.tz
import numpy as np
from garage.core import Serializable
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def flatten(l):
return [item for sublist in l for item in sublist]
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator:
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get('hide', False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [
x[1].__get__(self, self.__class__) for x in methods
if getattr(x[1], '__is_variant', False)
]
for m in methods:
self.add(m.__name__, m, **getattr(m, '__variant_config', dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get('hide', False):
suffix.append(k + '_' + str(variant[k]))
return '_'.join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, '__call__'):
args = inspect.getfullargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, '__self__'):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if not v]
if not free_nodes:
error_msg = 'Invalid parameter dependency: \n'
for k, v in dependencies:
if v:
error_msg += k + ' depends on ' + ' & '.join(v) + '\n'
raise ValueError(error_msg)
dependencies = [(k, v) for k, v in dependencies
if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if not sorted_keys:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, '__call__'):
last_val_keys = inspect.getfullargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(
last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, '__call__'):
last_variants = last_vals(
**{k: variant[k]
for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
def run_experiment(method_call=None,
batch_tasks=None,
exp_prefix='experiment',
exp_name=None,
log_dir=None,
script='garage.experiment.experiment_wrapper',
python_command='python',
dry=False,
env=None,
variant=None,
use_tf=False,
use_gpu=False,
use_cloudpickle=None,
pre_commands=None,
**kwargs):
"""Serialize the method call and run the experiment using the
specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
use_tf (bool): Used along with the Theano and GPU configuration
when using TensorFlow
use_gpu (bool): Whether the launched task is running on GPU.
This triggers a few configuration changes including certain
environment flags.
use_cloudpickle (bool): Whether to use cloudpickle or not.
pre_commands (str): Pre commands to run the experiment.
"""
assert method_call is not None or batch_tasks is not None, (
'Must provide at least either method_call or batch_tasks')
if use_cloudpickle is None:
for task in (batch_tasks or [method_call]):
assert hasattr(task, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle)
]
global exp_count
if use_tf:
if not use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.unsetenv('CUDA_VISIBLE_DEVICES')
for task in batch_tasks:
call = task.pop('method_call')
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
else:
data = base64.b64encode(pickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if task.get('exp_name', None) is None:
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,
exp_count)
if task.get('log_dir', None) is None:
task['log_dir'] = (
'{log_dir}/local/{exp_prefix}/{exp_name}'.format(
log_dir=osp.join(os.getcwd(), 'data'),
exp_prefix=exp_prefix.replace('_', '-'),
exp_name=task['exp_name']))
if task.get('variant', None) is not None:
variant = task.pop('variant')
if 'exp_name' not in variant:
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(
pickle.dumps(variant)).decode('utf-8')
elif 'variant' in task:
del task['variant']
task['env'] = task.get('env', dict()) or dict()
task['env']['GARAGE_USE_GPU'] = str(use_gpu)
task['env']['GARAGE_USE_TF'] = str(use_tf)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(
task, python_command=python_command, script=script)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params,
python_command='python',
script='garage.experiment.experiment_wrapper'):
command = python_command + ' -m ' + script
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for k, v in garage_env.items():
command = '{}={} '.format(k, v) + command
pre_commands = params.pop('pre_commands', None)
post_commands = params.pop('post_commands', None)
if pre_commands is not None or post_commands is not None:
print('Not executing the pre_commands: ', pre_commands,
', nor post_commands: ', post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == '_name':
command += ' --{} {}'.format(k, _to_param_val(nv))
else:
command += \
' --{}_{} {}'.format(k, nk, _to_param_val(nv))
else:
command += ' --{} {}'.format(k, _to_param_val(v))
return command
def concretize(obj):
if isinstance(obj, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in obj.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(obj, (list, tuple)):
return obj.__class__(list(map(concretize, obj)))
else:
return obj
|
#
# (c) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# In case you are tempted to import from non-built-in libraries, think twice:
# this module will be imported by monasca-agent which must therefore be able
# to import any dependent modules.
from __future__ import print_function
from collections import defaultdict
import glob
import json
from monasca_agent.collector import checks
import os
import socket
import subprocess
import threading
import time
OK = 0
WARN = 1
FAIL = 2
UNKNOWN = 3
# name used for metrics reported directly by this module e.g. when a task
# fails or times out. (we need to hard code this name rather than use the
# module name because the module name reported by __name__ is dependant on how
# monasca-agent imports the module)
MODULE_METRIC_NAME = 'cinderlm.cinderlm_check'
SERVICE_NAME = 'block-storage'
def create_task_failed_metric(task_type, task_name, reason=""):
"""Generate metric to report that a task has raised an exception."""
return dict(
metric=MODULE_METRIC_NAME,
dimensions={'type': task_type,
'task': task_name,
'service': SERVICE_NAME,
'hostname': socket.gethostname()},
# value_meta is limited to size 2048, truncate the reason
# to 2047 in length if it could contain a large traceback
value_meta=dict(
msg=('%s task %s execution failed: "%s"'
% (task_type.title(), task_name, reason))[:2047]),
value=FAIL)
def create_timed_out_metric(task_type, task_name):
"""Generate metric to report that a task has timed out."""
return dict(
metric=MODULE_METRIC_NAME,
dimensions={'type': task_type,
'task': task_name,
'service': SERVICE_NAME,
'hostname': socket.gethostname()},
value_meta=dict(
msg='%s task execution timed out: "%s"'
% (task_type.title(), task_name)),
value=FAIL)
def create_success_metric(task_type, task_name):
"""Generate metric to report that a task successful."""
return dict(
metric=MODULE_METRIC_NAME,
dimensions={'type': task_type,
'task': task_name,
'service': SERVICE_NAME,
'hostname': socket.gethostname()},
value_meta=dict(
msg='%s task execution succeeded: "%s"'
% (task_type.title(), task_name)),
value=OK)
class CommandRunner(object):
def __init__(self, command):
self.command = command
self.stderr = self.stdout = self.returncode = self.exception = None
self.timed_out = False
self.process = None
def run_with_timeout(self, timeout):
thread = threading.Thread(target=self.run_subprocess)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.timed_out = True
if self.process:
self.process.terminate()
def run_subprocess(self):
try:
self.process = subprocess.Popen(
self.command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout, self.stderr = self.process.communicate()
self.returncode = self.process.returncode
except Exception as e: # noqa
self.exception = e
class CinderLMScan(checks.AgentCheck):
# set of check tasks implemented, valid tasks are
# 'cinder-services'
# 'cinder-capacity'
# Tasks added here will be executed by the monasca check process
# sequentially in separate processes. We moved the capacity and services
# tasks to a cron job to improve the perfomance of monasca check in
# response to CINDER-405
TASKS = (
)
# command args to be used for all calls to shell commands
COMMAND_ARGS = ['/usr/bin/cinder_diag', '--json']
COMMAND_TIMEOUT = 15.0
SUBCOMMAND_PREFIX = '--'
# list of sub-comands each of which is appended to a shell command
# with the prefix added
DEFAULT_SUBCOMMANDS = TASKS
def __init__(self, name, init_config, agent_config, instances=None,
logger=None):
super(CinderLMScan, self).__init__(
name, init_config, agent_config, instances)
self.log = logger or self.log
def log_summary(self, task_type, summary):
task_count = len(summary.get('tasks', []))
if task_count == 1:
msg = 'Ran 1 %s task.' % task_type
else:
msg = 'Ran %d %s tasks.' % (task_count, task_type)
# suppress log noise if no tasks were configured
logger = self.log.info if task_count else self.log.debug
logger(msg)
def _run_command_line_task(self, task_name):
# we have to call out to a command line
command = list(self.COMMAND_ARGS)
command.append(self.SUBCOMMAND_PREFIX + task_name)
cmd_str = ' '.join(command)
runner = CommandRunner(command)
try:
runner.run_with_timeout(self.COMMAND_TIMEOUT)
except Exception as e: # noqa
self.log.warn('Command:"%s" failed to run with error:"%s"'
% (cmd_str, e))
metrics = create_task_failed_metric('command',
task_name,
e)
else:
if runner.exception:
self.log.warn('Command:"%s" failed during run with error:"%s"'
% (cmd_str, runner.exception))
metrics = create_task_failed_metric('command',
task_name,
runner.exception)
elif runner.timed_out:
self.log.warn('Command:"%s" timed out after %ss'
% (cmd_str, self.COMMAND_TIMEOUT))
metrics = create_timed_out_metric('command', cmd_str)
elif runner.returncode:
self.log.warn('Command:"%s" failed with status:%s stderr:%s'
% (cmd_str, runner.returncode, runner.stderr))
metrics = create_task_failed_metric('command',
task_name,
runner.stderr)
else:
try:
metrics = json.loads(runner.stdout)
metrics.append(create_success_metric('command', task_name))
except (ValueError, TypeError) as e:
self.log.warn('Failed to parse json: %s' % e)
metrics = create_task_failed_metric('command',
task_name,
e)
return metrics
def _get_metrics(self, task_names, task_runner):
reported = []
summary = defaultdict(list)
for task_name in task_names:
summary['tasks'].append(task_name)
metrics = task_runner(task_name)
if not isinstance(metrics, list):
metrics = [metrics]
for metric in metrics:
reported.append(metric)
return reported, summary
def _load_json_file(self, json_file):
with open(json_file, 'rb') as f:
all_json = json.load(f)
return all_json
def _get_file_metrics(self, argsfile):
reported = []
errors = []
jfile = None
for jfile in glob.glob(argsfile):
if os.path.isfile(jfile):
try:
reported.extend(self._load_json_file(jfile))
except Exception as e:
errors.extend(['Error: error loading JSON file %s: %s' %
(jfile, e)])
continue
else:
errors.extend(['Error: specified input(%s) is not a file' %
jfile])
continue
if jfile is None:
errors.extend(['Warning: no specified input file(%s) exists' %
argsfile])
# emit errors but continue to print json
for msg in errors:
self.log.error(msg)
# Fake out the timestamp with the current timestamp - we are submitting
# as if its NOW
ts = time.time()
for result in reported:
result['timestamp'] = ts
return reported
def _csv_to_list(self, csv):
return [f.strip() for f in csv.split(',') if f]
def _load_instance_config(self, instance):
self.log.debug('instance config %s' % str(instance))
if instance.get('subcommands') is None:
self.subcommands = self.DEFAULT_SUBCOMMANDS
else:
self.subcommands = self._csv_to_list(instance.get('subcommands'))
self.log.debug('Using subcommands %s' % str(self.subcommands))
def check(self, instance):
self._load_instance_config(instance)
# run command line tasks
all_metrics, summary = self._get_metrics(
self.subcommands, self._run_command_line_task)
self.log_summary('command', summary)
# gather metrics logged to directory
all_metrics.extend(
self._get_file_metrics('/var/cache/cinderlm/*.json'))
for metric in all_metrics:
# apply any instance dimensions that may be configured,
# overriding any dimension with same key that check has set.
metric['dimensions'] = self._set_dimensions(metric['dimensions'],
instance)
self.log.debug(
'metric %s %s %s %s'
% (metric.get('metric'), metric.get('value'),
metric.get('value_meta'), metric.get('dimensions')))
try:
self.gauge(**metric)
except Exception as e: # noqa
self.log.exception('Exception while reporting metric: %s' % e)
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def max_chunks_to_sorted(self, arr: List[int]) -> int:
stack = []
for num in arr:
if stack and num < stack[-1]:
head = stack.pop()
while stack and num < stack[-1]:
stack.pop()
stack.append(head)
else:
stack.append(num)
return len(stack)
|
"""
Exception classes used in the sample runner.
"""
class AccountStateError(Exception):
"For when an account doesn't have the right preconditions to support a sample."
pass
|
import os.path
import re
import time
import hashlib
import pickle
import io, gzip
import shutil
import json
import fnmatch
import glob
import sys
from .logger import flogger
from .bytesize import bytes_scaled
from .codex import phash
from .configure import load as load_config
elog = flogger(label='XDRIVE')
## init
_updates = False
config = load_config()
if 'root_dir' in config.xdrive:
ROOT = config.xdrive.root_dir
else:
ROOT = "X:"
class Folder():
def __init__(self, path=None):
self.path = os.path.join(ROOT, path)
_, self.folders, self.files = next(os.walk(self.path, topdown=True))
self.is_folder = True
def folder(self, f):
return Folder(os.path.join(self.path, f))
def file(self, f, **kwargs):
return File(os.path.join(self.path, f))
def create(self, *args):
os.makedirs(self.path, exist_ok=True)
class File():
def __init__(self, path=None):
self.path = os.path.join(ROOT, path)
self.is_folder = False
def upload(self, in_stream):
os.makedirs(os.path.dirname(self.path),exist_ok=True)
with open(self.path, mode='wb') as f:
shutil.copyfileobj(in_stream, f)
def download(self, out_stream):
with open(self.path, mode='rb') as f:
shutil.copyfileobj(f, out_stream)
@property
def size(self):
return os.path.getsize(self.path)
@property
def checksum(self):
return _sha512_checksum(self.path)
def FileOrFolder(path):
if os.path.isdir(path):
return Folder(path)
else:
return File(path)
def _folder_to_path(f):
if isinstance(f,Folder):
return f.path
if isinstance(f,File):
return f.path
return f
def pth(*arg):
return "/".join(_folder_to_path(f) for f in arg).replace('//','/').replace('\\','/')
def create_folder(folder_path, retries=10, interval=1):
"""
Create a new folder within Egnyte.
:param folder_path:
:return: egnyte.resources.Folder
"""
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=True)
return Folder(folder_path)
def create_subfolder(folder, subfoldername):
f = pth(folder,subfoldername)
os.makedirs(f, exist_ok=True)
return Folder(f)
def upload_file(local_file, xdrive_path, rename=None, add_suffix=None):
if rename is None:
basename = os.path.basename(local_file)
else:
basename = rename
if add_suffix:
basename = "{1}{0}{2}".format(add_suffix, *os.path.splitext(basename))
file_obj = File( pth(xdrive_path,basename) )
with open(local_file, "rb") as fp:
file_obj.upload(fp)
return
def upload_file_gz(local_file, egnyte_path, progress_callbacks=None):
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
basename = os.path.basename(local_file)+'.gz'
file_obj = File(pth(egnyte_path, basename))
buffer = io.BytesIO()
with open(local_file, 'rb') as f_in:
with gzip.open(buffer, 'wb') as buffer_out:
shutil.copyfileobj(f_in, buffer_out)
progress_callbacks.upload_start(local_file, file_obj, buffer.tell())
buffer.seek(0)
file_obj.upload(buffer)
progress_callbacks.upload_finish(file_obj)
def upload_dict_json(dictionary, filename, egnyte_path, progress_callbacks=None):
"""
Parameters
----------
dictionary : dict
The dictionary to convert to json and upload to egnyte
filename : str
A filename for the file that will be created in egnyte
egnyte_path : str
The (existing) folder in egnyte where the file will be created
progress_callbacks
"""
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
basename = os.path.basename(filename)
if basename[-5:] != '.json':
basename += '.json'
file_obj = File(pth(egnyte_path, basename))
buffer = io.BytesIO(json.dumps(dictionary).encode('UTF-8'))
progress_callbacks.upload_start("dictionary", file_obj, buffer.tell())
file_obj.upload(buffer)
progress_callbacks.upload_finish(file_obj)
def download_file(egnyte_file, local_path, overwrite=False, mkdir=True, progress_callbacks=None):
if not os.path.exists(local_path) and mkdir:
os.makedirs(local_path)
bulk_download([egnyte_file], local_path, overwrite=overwrite, log=(progress_callbacks is not None))
def download_file_gz(egnyte_file, local_path, overwrite=False, mkdir=True, progress_callbacks=None, retries=10, interval=1):
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
if not os.path.exists(local_path) and mkdir:
os.makedirs(local_path)
if isinstance(egnyte_file, str) and egnyte_file[-3:] != '.gz':
egnyte_file = egnyte_file+'.gz'
basename = os.path.basename(egnyte_file)[:-3]
if not overwrite and os.path.exists(os.path.join(local_path, basename)):
raise FileExistsError(os.path.join(local_path, basename))
file_obj = File(pth(egnyte_file))
buffer = io.BytesIO()
progress_callbacks.download_start(local_path, file_obj, file_obj.size)
file_obj.download(buffer)
buffer.seek(0)
with gzip.open(buffer, 'rb') as buffer_in:
with open(os.path.join(local_path, basename), 'wb') as f_out:
shutil.copyfileobj(buffer_in, f_out)
progress_callbacks.download_finish(file_obj)
from .zipdir import verify_hash_file
if os.path.exists(egnyte_file[:-3] + ".sha256.txt"):
verify_hash_file(os.path.join(local_path, basename), hash_dir=os.path.dirname(egnyte_file))
def download_dict_json(egnyte_file, progress_callbacks=None, retries=10, interval=1):
"""
Parameters
----------
egnyte_file : str
The location in egnyte for the json file to be loaded.
progress_callbacks
Returns
-------
dict
"""
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
import json, io
if isinstance(egnyte_file, str) and egnyte_file[-5:] != '.json':
egnyte_file = egnyte_file+'.json'
file_obj = File(pth(egnyte_file))
buffer = io.BytesIO()
progress_callbacks.download_start('dictionary', file_obj, file_obj.size)
file_obj.download(buffer)
buffer.seek(0)
result = json.loads(buffer.getvalue().decode('UTF-8'))
progress_callbacks.download_finish(file_obj)
return result
class ProgressCallbacks():
"""
This object is used for bulk transfers (uploads and downloads)
Inherit this and add override any of the callabcks you'd like to handle.
"""
def getting_info(self, cloud_path):
"""Getting information about an object. Called for directories and unknown paths."""
elog("getting info on {}".format(cloud_path))
def got_info(self, cloud_obj):
"""Got information about an object."""
def creating_directory(self, cloud_folder):
"""Creating a directory."""
elog("creating directory {}".format(cloud_folder))
def download_start(self, local_path, cloud_file, size):
"""Starting to download a file."""
elog("downloading {1} ({2})".format(local_path, cloud_file.path, bytes_scaled(size)))
def download_progress(self, cloud_file, size, downloaded):
"""Some progress in file download."""
def download_finish(self, cloud_file):
"""Finished downloading a file."""
def upload_start(self, local_path, cloud_file, size):
"""Starting to upload a file."""
elog("uploading {1} ({2})".format(local_path, cloud_file.path, bytes_scaled(size)))
def upload_progress(self, cloud_file, size, uploaded):
"""Some progress in file upload."""
def upload_finish(self, cloud_file):
"""Finished uploading a file."""
def finished(self):
"""Called after all operations."""
elog("finished")
def skipped(self, cloud_obj, reason):
"""Object has been skipped because of 'reason'"""
elog("skipped {} ({})".format(cloud_obj, reason))
DEFAULT_EXCLUDES = fnmatch.translate(".*")
DEFAULT_EXCLUDES_RE = re.compile(DEFAULT_EXCLUDES).match
def make_excluded(excludes=None):
if excludes is None:
return DEFAULT_EXCLUDES_RE
patterns = [DEFAULT_EXCLUDES]
patterns.extend(fnmatch.translate(x) for x in excludes)
return re.compile("|".join(patterns)).match
def generate_paths(roots, excludes=None):
"""
Walk set of paths in local filesystem, and for each file and directory generate a tuple of
(is directory, absolute path, path relative root used to get to that file)
"""
excluded = make_excluded(excludes)
for root in roots:
base = os.path.basename(root)
if not excluded(base):
is_dir = os.path.isdir(root)
yield is_dir, root, base
if is_dir:
prefix_len = len(os.path.dirname(root))
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=True):
relpath = dirpath[prefix_len:].strip('/')
for is_dir, names in ((False, filenames), (True, dirnames)):
for name in names:
if not excluded(name):
yield is_dir, os.path.join(dirpath, name), "%s/%s" % (relpath, name)
def bulk_upload(local_dir, xdrive_path, exclude=None, progress_callbacks=None):
"""
Transfer many files or directories to Cloud File System.
* paths - list of local file paths
* target - Path in CFS to upload to
* progress_callbacks - Callback object (see ProgressCallbacks)
"""
if not local_dir:
return
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks() # no-op callbacks
target_folder = Folder(xdrive_path)
progress_callbacks.creating_directory(target_folder)
target_folder.create(True)
for is_dir, local_path, cloud_path in generate_paths(local_dir, exclude):
if is_dir:
cloud_dir = target_folder.folder(cloud_path)
progress_callbacks.creating_directory(cloud_dir)
cloud_dir.create(True)
else:
size = os.path.getsize(local_path)
if size: # empty files cannot be uploaded
cloud_file = target_folder.file(cloud_path, size=size)
with open(local_path, "rb") as fp:
progress_callbacks.upload_start(local_path, cloud_file, size)
cloud_file.upload(fp)
progress_callbacks.upload_finish(cloud_file)
progress_callbacks.finished()
def _sha512_checksum(filename, block_size=65536):
sha512 = hashlib.sha512()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha512.update(block)
return sha512.hexdigest()
def _pines_bulk_download_worker(items, root_path, local_dir, overwrite, progress_callbacks):
import collections, shutil
any_updates = False
root_len = len(root_path.rstrip('/')) + 1
queue = collections.deque(items)
while True:
try:
obj = queue.popleft()
except IndexError:
break
relpath = obj.path[root_len:].strip('/')
local_path = os.path.join(local_dir, relpath.replace('/', os.sep))
dir_path = os.path.dirname(local_path)
if not os.path.isdir(dir_path):
if os.path.exists(dir_path):
if overwrite:
os.unlink(local_path)
else:
progress_callbacks.skipped(obj, "Existing file conflicts with cloud folder")
continue
os.makedirs(dir_path)
if obj.is_folder:
# schedule contents for later, files first
if obj.files is None:
progress_callbacks.getting_info(obj.path)
obj.list()
progress_callbacks.got_info(obj)
queue.extend(obj.files)
queue.extend(obj.folders)
else:
if os.path.exists(local_path):
if overwrite:
# read local checksum
if _sha512_checksum(local_path) != obj.checksum:
if os.path.isdir(local_path) and not os.path.islink(local_path):
shutil.rmtree(local_path)
else:
os.unlink(local_path)
else:
continue
else:
progress_callbacks.skipped(obj, "Existing file conflicts with cloud file")
continue
progress_callbacks.download_start(local_path, obj, obj.size)
obj.download(local_path)
any_updates = True
progress_callbacks.download_finish(obj)
return any_updates
def _pines_bulk_download( paths, local_dir, overwrite=False, progress_callbacks=None):
"""
Transfer many files or directories to Cloud File System.
* paths - list of local file paths
* target - Path in CFS to upload to
* progress_callbacks - Callback object (see ProgressCallbacks)
"""
any_updates = False
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
for path in paths:
progress_callbacks.getting_info(path)
obj = FileOrFolder(path)
root_path = path[:path.rstrip('/').rfind('/')] # take all segments expect last one
if obj.is_folder:
items = obj.files + obj.folders
else:
items = (obj,)
any_updates = _pines_bulk_download_worker(items, root_path, local_dir, overwrite, progress_callbacks)
progress_callbacks.finished()
return any_updates
def bulk_download( egnyte_path, local_dir, log=True, overwrite=False, progress_callbacks=None ):
p_callbacks = progress_callbacks or (ProgressCallbacks() if log else None)
if isinstance(egnyte_path, str):
return _pines_bulk_download([egnyte_path], local_dir, overwrite=overwrite, progress_callbacks=p_callbacks)
else:
return _pines_bulk_download(egnyte_path, local_dir, overwrite=overwrite, progress_callbacks=p_callbacks)
def import_remote_python_package( egnyte_path, package_name=None, log=True ):
if package_name is None:
if egnyte_path[-1] in ('/','\\'):
package_name = os.path.basename(egnyte_path[:-1])
else:
package_name = os.path.basename(egnyte_path[:])
import sys, importlib
from .temporary import TemporaryDirectory
tempdir = TemporaryDirectory()
any_updates = bulk_download([egnyte_path], tempdir.name, overwrite=True, log=log)
if tempdir.name not in sys.path:
sys.path.insert(0, tempdir.name)
importlib.invalidate_caches()
if package_name in sys.modules:
if any_updates:
return importlib.reload(sys.modules[package_name])
else:
return sys.modules[package_name]
else:
return importlib.import_module(package_name)
# from pines.egnyte import import_remote_python_package
# import_remote_python_package('/Private/jnewman/PyAccess/werter', 'werter')
def glob_upload_gz(pattern, egnyte_path, log=True, dryrun=False):
"""
Upload a gzipped version of all files matching pattern into egynte.
Parameters
----------
pattern : str
A glob pattern
egnyte_path : str or egnyte.Folder
log : bool, default True
Log the results
dryrun : bool, default False
If true, just log what would be done, don't actually upload the files.
"""
for filename in glob.glob(pattern):
if log:
elog(f"found file for upload:{filename}")
if not dryrun:
upload_file_gz(filename, egnyte_path, progress_callbacks=ProgressCallbacks() if log else None)
def pip_install_1(xdrive_python_package_file):
import pip
pip.main(['install', xdrive_python_package_file])
def pip_install(package_names=None, xdrive_repo="X:/Share/CHI/Shared/JPN/PythonRepo/simple/"):
import pip
if package_names is None:
if len(sys.argv)>0 and (('pines_pip' in sys.argv[0]) or ('pines-pip' in sys.argv[0])):
if len(sys.argv)>1 and sys.argv[1]=='install': # ignore install command, it is implied here
package_names = " ".join(sys.argv[2:])
else:
package_names = " ".join(sys.argv[1:])
try:
pkgs = package_names.split()
except AttributeError:
print("NO PACKAGES GIVEN")
else:
for pkg in pkgs:
result = pip.main(["install", "--upgrade", f'--index-url=file:///{xdrive_repo}', pkg])
if result!=0:
# failure
raise ModuleNotFoundError(pkg)
def _pip_install_entry(args=None):
return pip_install()
def pip_rebuild(xdrive_repo="X:/Share/CHI/Shared/JPN/PythonRepo", private_repo=r"\\camtdm01\c$\Apache24\htdocs"):
import libpip2pi.commands
libpip2pi.commands.dir2pi(argv=["dir2pi",xdrive_repo, '-S'])
import shutil, os
shutil.copytree(os.path.join(xdrive_repo, 'simple'), private_repo)
|
# 42 BSQ — My Map Generator
from sys import argv
from random import choices
BALANCE = 0.035
MAP_BLOCKS = '.ox'
try:
length, width, density = int(argv[1]), int(argv[2]), (int(argv[3]) * BALANCE)
print(f'{length}{MAP_BLOCKS}')
for i in range(length):
print(''.join(choices(MAP_BLOCKS[:2], [1, density], k=width)))
except Exception:
print('Usage: length width density')
|
import logging
import sys
from .. import settings
logger = logging.getLogger(__name__)
def hosts(to_write=False):
"""if to_write is True, then only non-read-only hosts
will be returned
"""
return settings.server_manager.active_hosts(to_write=to_write)
def get_info_bulk(urls):
"""
active_hosts - dict of active hostnames to host urls
results - dict of url -> data for each url
each value is a tuple (location_info - which hosts have the data)
and data_info, metadata about the url, (file size, md5s, etc..)
"""
results = {}
active_hosts = hosts()
for u in urls:
location_info, data_info = settings.catalog._get_info(u)
for host in list(location_info):
if host not in active_hosts:
location_info.remove(host)
results[u] = location_info, data_info
return active_hosts, results
def search_path(path_pattern):
return settings.catalog.search(path_pattern)
def chunked_copy(url, length, host):
#print >> sys.stderr ,"chunked copy of %s from %s to %s" % (url, host, settings.host_url)
logger.info("chunked copy of %s from %s to %s" % (url, host, settings.host_url))
iterator = settings.catalog.get_chunked_iterator(url, length, hostname=host)
settings.catalog.write_chunked(iterator, url, is_new=False)
def delete(url):
settings.catalog.delete(url)
def bootstrap(url, data_type='object', fmt='cloudpickle'):
settings.catalog.bootstrap(url, data_type=data_type, fmt=fmt)
|
"""
Utilities used within the Demographic and Economic Forecasting Model
Includes pandas DataFrame reshaping (pivot) and parsing yaml file
"""
import pandas as pd
import yaml
def apply_pivot(df):
"""
Pivot the migration rates DataFrame such that rates for each of the 4 mig
rate types are in separate columns instead of in a single rate column.
Parameters
----------
df : pandas DataFrame with migration rates for each of the four migration
rate types in a single column
Returns
-------
df : pandas DataFrame with rate values in 4 columns corresponding to 4
rate types (Domestic In, Domestic Out, Foreign In, Foreign Out)
"""
df = df.reset_index(drop=False)
df = pd.pivot_table(df, values='rate',
index=['age','race_ethn','sex','yr'],
columns=['migration'])
df = df.reset_index()
return df
def yaml_to_dict(yaml_file, yaml_section):
"""
Load YAML from a file
Read specific section to dictionary
Parameters
----------
yaml_file : File name from which to load YAML
yaml_section : Section of YAML file to process
Returns
-------
dict
Conversion from YAML for a specific section.
"""
with open(yaml_file,'r') as f:
d = yaml.load(f)[yaml_section]
return d
|
# coding: utf8
"""
Implementation of :py:class:`ConstrainedOptimizer` class, which has 2 main
methods:
- :py:meth:`~ConstrainedOptimizer.zero_grad`
- :py:meth:`~ConstrainedOptimizer.step`
"""
from typing import Callable, Optional
import torch
from .problem import CMPState, Formulation
class ConstrainedOptimizer:
"""
Optimizes a :py:class:`~cooper.problem.ConstrainedMinimizationProblem`
given its :py:class:`~cooper.problem.Formulation`.
A ``ConstrainedOptimizer`` includes one or two
:class:`torch.optim.Optimizer`\\s, for the primal and dual variables
associated with the ``Formulation``, respectively.
A ``ConstrainedOptimizer`` can be used on constrained or unconstrained
``ConstrainedMinimizationProblem``\\s. Please refer to the documentation
of the :py:class:`~cooper.problem.ConstrainedMinimizationProblem` and
:py:class:`~cooper.problem.Formulation` classes for further details on
handling unconstrained problems.
Args:
formulation: ``Formulation`` of the ``ConstrainedMinimizationProblem``
to be optimized.
primal_optimizer: Fully instantiated ``torch.optim.Optimizer`` used
to optimize the primal parameters (e.g. model parameters).
dual_optimizer: Partially instantiated ``torch.optim.Optimizer``
used to optimize the dual variables (e.g. Lagrange multipliers).
Defaults to None.
When dealing with an unconstrained problem, should be set to None.
dual_scheduler: Partially instantiated
``torch.optim.lr_scheduler._LRScheduler``
used to schedule the learning rate of the dual variables.
Defaults to None.
When dealing with an unconstrained problem, should be set to None.
alternating: Whether to alternate parameter updates between primal and
dual parameters. Otherwise, do simultaneous parameter updates.
Defaults to False.
dual_restarts: If True, perform "restarts" on the Lagrange
multipliers associated with inequality constraints: whenever the
constraint is satisfied, directly set the multiplier to zero.
Defaults to False.
"""
def __init__(
self,
formulation: Formulation,
primal_optimizer: torch.optim.Optimizer,
dual_optimizer: Optional[torch.optim.Optimizer] = None,
dual_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
alternating: bool = False,
dual_restarts: bool = False,
):
self.formulation = formulation
self.cmp = self.formulation.cmp
self.primal_optimizer = primal_optimizer
self.dual_optimizer = dual_optimizer
self.dual_scheduler = dual_scheduler
self.alternating = alternating
self.dual_restarts = dual_restarts
self.sanity_checks()
def sanity_checks(self):
"""
Perform sanity checks on the initialization of ``ConstrainedOptimizer``.
Raises:
NotImplementedError: The ``Formulation`` has an augmented Lagrangian
coefficient and ``primal_optimizer`` has an ``extrapolation``
function. This is not supported because of possible unexpected
behavior.
RuntimeError: The ``primal_optimizer`` has an ``extrapolation``
function and ``alternating`` was set to True. Mixing
extrapolation and alternating updates is not supported.
RuntimeError: a ``dual_optimizer`` was provided but the
``ConstrainedMinimizationProblem`` of formulation was
unconstrained. There are no dual variables to optimize.
RuntimeError: a ``dual_scheduler`` was provided but the
``ConstrainedMinimizationProblem`` of formulation was
unconstrained. There are no dual variables and no
``dual_optimizer`` for learning rate scheduling.
RuntimeError: a ``dual_scheduler`` was provided but no
``dual_optimizer`` was provided. Can not schedule the learning
rate of an unknown optimizer.
RuntimeError: the considered ``ConstrainedMinimizationProblem`` is
unconstrained, but the provided ``primal_optimizer`` has an
``extrapolation`` function. This is not supported because of
unexpected behavior when using extrapolation to update the
primal parameters without any dual parameters.
RuntimeError: One of ``primal_optimizer`` or ``dual_optimizer`` has
an extrapolation function while the other does not.
Extrapolation on only one player is not supported.
"""
is_alternating = self.alternating
is_aug_lag = hasattr(self.formulation, "aug_lag_coefficient") and (
self.formulation.aug_lag_coefficient > 0
)
# We assume that both optimizers agree on whether to use extrapolation
# or not, so we use the primal optimizer as reference for deciding
# whether to use extrapolation. See check below for matching
# extrapolation behavior.
self.is_extrapolation = hasattr(self.primal_optimizer, "extrapolation")
if is_aug_lag and self.is_extrapolation:
raise NotImplementedError(
"""It is currently not possible to use extrapolation and an
augmented Lagrangian formulation"""
)
if is_alternating and self.is_extrapolation:
raise RuntimeError(
"""Should not use extrapolation and alternating updates
simultaneously. Please disable one of these two modes."""
)
if not (self.cmp.is_constrained) and (self.dual_optimizer is not None):
raise RuntimeError(
"""Provided a dual optimizer, but the `Problem` class claims to
be unconstrained."""
)
if self.dual_scheduler is not None:
if not (self.cmp.is_constrained):
raise RuntimeError(
"""A dual scheduler was provided, but the `Problem` class
claims to be unconstrained."""
)
if self.dual_optimizer is None:
raise RuntimeError(
"""A dual scheduler was provided, but no dual optimizer
was provided."""
)
if not (self.cmp.is_constrained) and self.is_extrapolation:
raise RuntimeError(
"""Using an extrapolating optimizer an unconstrained problem
might result in unexpected behavior. Consider using a
non-extrapolating optimizer instead."""
)
if hasattr(self.primal_optimizer, "extrapolation") != hasattr(
self.dual_optimizer, "extrapolation"
):
raise RuntimeError(
"""Primal and dual optimizers do not agree on whether to use
extrapolation or not."""
)
def step(
self,
closure: Optional[Callable[..., CMPState]] = None,
*closure_args,
**closure_kwargs
):
"""
Performs a single optimization step on both the primal and dual
variables. If ``dual_scheduler`` is provided, a scheduler step is
performed on the learning rate of the ``dual_optimizer``.
Args:
closure: Closure ``Callable`` required for re-evaluating the
objective and constraints when performing alternating or
extrapolating updates.
Defaults to None.
*closure_args: Arguments to be passed to the closure function
when re-evaluating.
**closure_kwargs: Keyword arguments to be passed to the closure
function when re-evaluating.
"""
if self.cmp.is_constrained and not hasattr(self.dual_optimizer, "param_groups"):
assert self.dual_optimizer is not None and callable(self.dual_optimizer)
# Checks if needed and instantiates dual_optimizer
self.dual_optimizer = self.dual_optimizer(self.formulation.dual_parameters)
if self.dual_scheduler is not None:
assert callable(self.dual_scheduler), "dual_scheduler must be callable"
# Instantiates the dual_scheduler
self.dual_scheduler = self.dual_scheduler(self.dual_optimizer)
if self.is_extrapolation or self.alternating:
assert closure is not None
if self.is_extrapolation:
# Store parameter copy and compute t+1/2 iterates
self.primal_optimizer.extrapolation() # type: ignore
if self.cmp.is_constrained:
# Call to dual_step flips sign of gradients, then triggers call
# to dual_optimizer.extrapolation and projects dual variables
self.dual_step(call_extrapolation=True)
# Zero gradients and recompute loss at t+1/2
self.zero_grad()
# For extrapolation, we need closure args here as the parameter
# values will have changed in the update applied on the
# extrapolation step
lagrangian = self.formulation.composite_objective(
closure, *closure_args, **closure_kwargs
) # type: ignore
# Populate gradients at extrapolation point
self.formulation.custom_backward(lagrangian)
# After this, the calls to `step` will update the stored copies with
# the newly computed gradients
self.primal_optimizer.step()
if self.cmp.is_constrained:
self.dual_step()
if self.dual_scheduler is not None:
# Do a step on the dual scheduler after the actual step on
# the dual parameters. Intermediate updates that take
# place inside the extrapolation process do not perform a
# call to the scheduler's step method
self.dual_scheduler.step()
else:
self.primal_optimizer.step()
if self.cmp.is_constrained:
if self.alternating:
# TODO: add test for this
# Once having updated primal parameters, re-compute gradient
# Skip gradient wrt model parameters to avoid wasteful
# computation, as we only need gradient wrt multipliers.
with torch.no_grad():
assert closure is not None
self.cmp.state = closure(*closure_args, **closure_kwargs)
lagrangian = self.formulation.composite_objective(self.cmp) # type: ignore
# Zero-out gradients for dual variables since they were
# already populated earlier.
# We also zero-out primal gradients for safety although not
# really necessary.
self.zero_grad(ignore_primal=False, ignore_dual=False)
# Not passing lagrangian since we only want to update the
# gradients for the dual variables
self.formulation._populate_gradients(
lagrangian=None, ignore_primal=True
)
self.dual_step()
if self.dual_scheduler is not None:
self.dual_scheduler.step()
def dual_step(self, call_extrapolation=False):
# Flip gradients for multipliers to perform ascent.
# We only do the flipping *right before* applying the optimizer step to
# avoid accidental double sign flips.
for multiplier in self.formulation.state():
if multiplier is not None:
multiplier.grad.mul_(-1.0)
# Update multipliers based on current constraint violations (gradients)
if call_extrapolation:
self.dual_optimizer.extrapolation()
else:
self.dual_optimizer.step()
if self.formulation.ineq_multipliers is not None:
if self.dual_restarts:
# "Reset" value of inequality multipliers to zero as soon as
# solution becomes feasible
self.restart_dual_variables()
# Apply projection step to inequality multipliers
self.formulation.ineq_multipliers.project_()
def restart_dual_variables(self):
# Call to formulation._populate_gradients has already flipped sign
# A currently *positive* gradient means original defect is negative, so
# the constraint is being satisfied.
# The code below still works in the case of proxy constraints, since the
# multiplier updates are computed based on *non-proxy* constraints
feasible_filter = self.formulation.ineq_multipliers.weight.grad > 0
self.formulation.ineq_multipliers.weight.grad[feasible_filter] = 0.0
self.formulation.ineq_multipliers.weight.data[feasible_filter] = 0.0
def zero_grad(self, ignore_primal: bool = False, ignore_dual: bool = False):
"""
Sets the gradients of all optimized
:py:class:`~torch.nn.parameter.Parameter`\\s to zero. This includes both
the primal and dual variables.
Args:
ignore_primal: If True, the gradients of the primal variables will
not be zeroed. Defaults to False.
ignore_dual: If True, the gradients of the dual variables will not
be zeroed. Defaults to False.
"""
if not ignore_primal:
self.primal_optimizer.zero_grad()
if not ignore_dual:
if self.formulation.is_state_created:
if self.dual_optimizer is None:
raise RuntimeError(
"Requested zeroing gradients but dual_optimizer is None."
)
else:
self.dual_optimizer.zero_grad()
|
import random
import math
import networkx as nx
from qtpy.QtCore import Qt
from qtpy.QtCore import QPointF
from qtpy.QtGui import QColor
from qtpy.QtGui import QFont
import nezzle
from nezzle.graphics import NodeClassFactory
from nezzle.graphics import EdgeClassFactory
from nezzle.graphics import LabelClassFactory
from nezzle.graphics import ArrowClassFactory
from nezzle.graphics import Network
from nezzle.graphics import SelfloopEdge
from nezzle.constants import DEFAULT_SCENE_WIDTH, DEFAULT_SCENE_HEIGHT
from nezzle.utils.math import rotate, dist, internal_division
def to_graphics(dg, iden, no_edge_type=False):
if not isinstance(dg, nx.DiGraph):
raise TypeError("NetworkX.DiGraph should be given, not %s"%(type(dg)))
net = Network(iden)
#net.scene.setBackgroundBrush(QColor(0, 0, 0, 0))
net.scene.setBackgroundBrush(Qt.transparent)
NodeClass = NodeClassFactory.create("ELLIPSE_NODE")
nodes = {}
counter_node = 0
counter_edge = 0
# Set constants with default values
scene_width = DEFAULT_SCENE_WIDTH
scene_height = DEFAULT_SCENE_HEIGHT
# Node color
color_node = Qt.white
half_width = scene_width / 2
half_height = scene_height / 2
range_x = (-scene_width / 4, scene_width / 4)
range_y = (-scene_height / 4, scene_height / 4)
# Node size
width = 50
height = 35
# Edge types
str_act = '+'
str_inh = '-'
for str_src, str_trg, edge_data in dg.edges(data=True):
str_edge_type = None
head = None
if not no_edge_type:
if 'HEAD' in edge_data:
if isinstance(edge_data['HEAD'], dict):
attr_head = edge_data.pop('HEAD')
ArrowClass = ArrowClassFactory.create(attr_head['ITEM_TYPE'])
head = ArrowClass.from_dict(attr_head)
# if dict_head['ITEM_TYPE'] == "TRIANGLE":
# str_edge_type = '+'
# elif dict_head['ITEM_TYPE'] == "HAMMER":
# str_edge_type = '-'
else:
str_edge_type = edge_data.pop('HEAD')
elif 'SIGN' in edge_data:
sign_edge = edge_data['SIGN']
if sign_edge > 0:
str_edge_type = str_act
elif sign_edge < 0:
str_edge_type = str_inh
else:
raise ValueError("Undefined edge sign: %s"%(sign_edge))
if not head and not no_edge_type \
and (str_edge_type not in (str_act, str_inh)):
raise ValueError("Undefined edge type: %s"%(str_edge_type))
if 'POS_X' in dg.nodes[str_src]:
sx = dg.nodes[str_src]['POS_X']
else:
sx = half_width + random.uniform(*range_x)
if 'POS_Y' in dg.nodes[str_src]:
sy = dg.nodes[str_src]['POS_Y']
else:
sy = half_height + random.uniform(*range_y)
if 'POS_X' in dg.nodes[str_trg]:
tx = dg.nodes[str_trg]['POS_X']
else:
tx = half_width + random.uniform(*range_x)
if 'POS_Y' in dg.nodes[str_trg]:
ty = dg.nodes[str_trg]['POS_Y']
else:
ty = half_height + random.uniform(*range_y)
if str_src in nodes:
src = nodes[str_src]
else:
counter_node += 1
src = NodeClass(str_src, width=width, height=height,
pos=QPointF(sx, sy))
if "FILL_COLOR" not in dg.nodes[str_src]:
src["FILL_COLOR"] = color_node
if 'BORDER_COLOR' not in dg.nodes[str_src]:
src['BORDER_COLOR'] = Qt.darkGray
src.update(dg.nodes[str_src])
nodes[str_src] = src
# end of else
if str_trg in nodes:
trg = nodes[str_trg]
else:
counter_node += 1
trg = NodeClass(str_trg, width=width, height=height,
pos=QPointF(tx, ty))
if "FILL_COLOR" not in dg.nodes[str_trg]:
trg["FILL_COLOR"] = color_node
if 'BORDER_COLOR' not in dg.nodes[str_trg]:
trg['BORDER_COLOR'] = Qt.darkGray
trg.update(dg.nodes[str_trg])
nodes[str_trg] = trg
# end of else
counter_edge += 1
# Add head
if not head: # Head can created according to head information.
if no_edge_type:
ArrowClass = None
elif str_edge_type == '+':
head_type = "TRIANGLE"
ArrowClass = ArrowClassFactory.create(head_type)
elif str_edge_type == '-':
head_type = "HAMMER"
ArrowClass = ArrowClassFactory.create(head_type)
else:
pass # This logic is processed just below.
if ArrowClass:
head = ArrowClass()
# Add edge with head
if str_src == str_tgt: # Self-loop edge
EdgeClass = EdgeClassFactory.create('SELFLOOP_EDGE')
iden = "%s%s%s" % (str_src, str_edge_type, str_src)
edge = EdgeClass(iden=iden,
name=str_edge_type,
node=src,
head=head)
if "FILL_COLOR" not in edge_data:
edge["FILL_COLOR"] = QColor(100, 100, 100, 100)
# Update extra data in nezzle.graphics.Edge object.
edge.update(edge_data)
else:
EdgeClass = EdgeClassFactory.create('CURVED_EDGE')
iden = "%s%s%s" % (str_src, str_edge_type, str_tgt)
edge = EdgeClass(iden=iden,
name= str_edge_type,
source=src, target=trg,
head=head)
if "FILL_COLOR" not in edge_data:
edge["FILL_COLOR"] = Qt.black
# Update extra data in nezzle.graphics.Edge object.
edge.update(edge_data)
# end of else
src.add_edge(edge)
trg.add_edge(edge)
net.add_edge(edge)
# end of for : reading each line of SIF file
# Add nodes and labels in network
font = QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
LabelClass = LabelClassFactory.create("TEXT_LABEL")
for str_name, node in nodes.items():
net.add_node(node)
label = LabelClass(node, str_name)
label.font = font
rect = label.boundingRect()
label.setPos(-rect.width()/2, -rect.height()/2)
net.add_label(label)
nodes[str_name] = node
# Make the two edges of interconnected nodes curved.
for src, trg, attr in net.nxgraph.edges(data=True):
if net.nxgraph.has_edge(trg, src):
if src == trg: # Skip selfloops
continue
edge = attr['GRAPHICS']
mid = internal_division(edge.pos_src, edge.pos_tgt, 0.5, 0.5)
d = dist(edge.pos_src, mid)/math.cos(math.pi/4)
cp = rotate(edge.pos_src, mid, -30, d)
edge.ctrl_point.setPos(cp)
return net
def to_networkx(net):
if not isinstance(net, nezzle.graphics.Network):
raise TypeError("nezzle.graphics.Network should be given, not %s"%(type(net)))
dg = nx.DiGraph()
dg.name = net.iden
for iden, edge in net.edges.items():
if isinstance(edge, SelfloopEdge):
src = trg = edge.node
else:
src = edge.source
trg = edge.target
if src.iden not in dg.nodes:
dg.add_node(src.iden)
dg.nodes[src.iden].update(src.to_dict())
if trg.iden not in dg.nodes:
dg.add_node(trg.iden)
dg.nodes[trg.iden].update(trg.to_dict())
dg.add_edge(src.iden, trg.iden)
edge_data = dg.edges[src.iden, trg.iden]
edge_data.update(edge.to_dict())
# Set sign information if head exists.
if edge.head:
sign_edge = 0
if edge.head.ITEM_TYPE == "TRIANGLE":
sign_edge = +1
elif edge.head.ITEM_TYPE == "HAMMER":
sign_edge = -1
edge_data['SIGN'] = sign_edge
return dg
|
# -*- coding: utf-8 -*-
from unittest import TestCase as UnitTestCase
from nose import tools, SkipTest
import django
from django import template
from django.template import TemplateSyntaxError
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.test import RequestFactory
from ella.core.templatetags.core import listing_parse, _parse_box, BoxNode, EmptyNode
from ella.core.templatetags.pagination import _do_paginator
from ella.core.models import Category
from ella.core.managers import ListingHandler
from ella.articles.models import Article
from ella.photos.models import Photo
from test_ella.test_core import create_basic_categories, create_and_place_a_publishable, \
create_and_place_more_publishables, list_all_publishables_in_category_by_hour
from test_ella import template_loader
from test_ella.cases import RedisTestCase as TestCase
class TestPaginate(UnitTestCase):
def setUp(self):
super(TestPaginate, self).setUp()
self.rf = RequestFactory()
def tearDown(self):
super(TestPaginate, self).tearDown()
template_loader.templates = {}
def test_all_querysting_is_included(self):
req = self.rf.get('/', {'using': 'custom_lh', 'other': 'param with spaces'})
page = Paginator(range(100), 10).page(2)
context = {
'request': req,
'page': page
}
tools.assert_equals((('inclusion_tags/paginator.html', 'inc/paginator.html'), {
'page': page,
'page_numbers': [1, 2, 3, 4, 5],
'query_params': '?using=custom_lh&other=param+with+spaces&p=',
'results_per_page': 10,
'show_first': False,
'show_last': True
}), _do_paginator(context, 2, None))
def test_always_include_given_number_of_pages(self):
page = Paginator(range(100), 9).page(1)
tools.assert_equals((('inclusion_tags/paginator_special.html', 'inc/paginator_special.html'), {
'page': page,
'page_numbers': [1, 2, 3, 4, 5, 6, 7],
'query_params': '?p=',
'results_per_page': 9,
'show_first': False,
'show_last': True
}), _do_paginator({'page': page}, 3, 'special'))
def test_dont_fail_on_missing_page(self):
tools.assert_equals((('inclusion_tags/paginator.html', 'inc/paginator.html'), {}), _do_paginator({}, 2, None))
def test_proper_template_gets_rendered(self):
template_loader.templates['inclusion_tags/paginator_special.html'] = 'special'
t = template.Template('{% load pagination %}{% paginator 1 "special" %}')
tools.assert_equals('special', t.render(template.Context()))
def test_proper_template_gets_rendered_via_kwargs(self):
if django.VERSION[:2] < (1, 4):
raise SkipTest()
template_loader.templates['inclusion_tags/paginator_special.html'] = 'special'
t = template.Template('{% load pagination %}{% paginator template_name="special" %}')
tools.assert_equals('special', t.render(template.Context()))
def test_adjacent_places_get_passed_from_template(self):
page = Paginator(range(100), 9).page(1)
template_loader.templates['inclusion_tags/paginator.html'] = '{{ page_numbers|join:", "}}'
t = template.Template('{% load pagination %}{% paginator 1 %}')
tools.assert_equals('1, 2, 3', t.render(template.Context({'page': page})))
class TestRenderTag(UnitTestCase):
def test_raises_error_on_no_args(self):
t = '{% render %}'
tools.assert_raises(template.TemplateSyntaxError, template.Template, t)
def test_raises_error_on_more_args(self):
t = '{% render 1 2 3 %}'
tools.assert_raises(template.TemplateSyntaxError, template.Template, t)
def test_fail_silently_on_empty_var(self):
t = template.Template('{% render var_name %}')
tools.assert_equals('', t.render(template.Context()))
def test_renders_var(self):
t = template.Template('{% render var %}')
tools.assert_equals('XXX', t.render(template.Context({'var': 'XXX'})))
def test_renders_nested_var(self):
t = template.Template('{% render var.subvar.subsubvar %}')
var = {'subvar': {'subsubvar': 'XXX'}}
tools.assert_equals('XXX', t.render(template.Context({'var': var})))
def test_renders_var_in_context(self):
t = template.Template('{% render var %}')
tools.assert_equals('YYY', t.render(template.Context({'var': '{{ other_var }}', 'other_var' : 'YYY'})))
def test_does_not_escape_output(self):
t = template.Template('{% render var %}')
tools.assert_equals('<html> ""', t.render(template.Context({'var': '<html> ""'})))
class TestListingTag(TestCase):
def setUp(self):
super(TestListingTag, self).setUp()
create_basic_categories(self)
create_and_place_a_publishable(self)
create_and_place_more_publishables(self)
list_all_publishables_in_category_by_hour(self)
def test_get_listing(self):
t = template.Template('{% listing 10 for category as var %}{{ var|join:":" }}')
expected = ':'.join([str(listing) for listing in self.listings if listing.category == self.category])
tools.assert_equals(expected, t.render(template.Context({'category': self.category})))
def test_get_listing_with_immediate_children(self):
t = template.Template('{% listing 10 for category with children as var %}{{ var|join:":" }}')
expected = ':'.join([str(listing) for listing in self.listings if listing.category in (self.category, self.category_nested)])
tools.assert_equals(expected, t.render(template.Context({'category': self.category})))
def test_get_listing_with_immediate_children_and_offset(self):
t = template.Template('{% listing 10 from 2 for category with children as var %}{{ var|join:":" }}')
expected = ':'.join([str(listing) for listing in self.listings if listing.category in (self.category, self.category_nested)][1:])
tools.assert_equals(expected, t.render(template.Context({'category': self.category})))
def test_get_listing_with_immediate_children_offset_and_count(self):
t = template.Template('{% listing 1 from 2 for category with children as var %}{{ var|join:":" }}')
expected = [str(listing) for listing in self.listings if listing.category in (self.category, self.category_nested)][1]
tools.assert_equals(expected, t.render(template.Context({'category': self.category})))
def test_get_listing_without_a_publishable(self):
t = template.Template('{% listing 10 for category without p as var %}{{ var|join:":" }}')
tools.assert_equals('', t.render(template.Context({'category': self.category, 'p': self.publishables[0]})))
class TestListingTagParser(TestCase):
'''
{% listing <limit>[ from <offset>][of <app.model>[, <app.model>[, ...]]][ for <category> ] [with children|descendents] as <result> %}
'''
def setUp(self):
self.act = ContentType.objects.get_for_model(Article)
self.pct = ContentType.objects.get_for_model(Photo)
super(TestListingTagParser, self).setUp()
create_basic_categories(self)
def test_minimal_args(self):
var_name, parameters = listing_parse(['listing', '1', 'as', 'var'])
tools.assert_equals('var', var_name)
tools.assert_equals(1, parameters['count'].literal)
def test_offset(self):
var_name, parameters = listing_parse(['listing', '1', 'from', '10', 'as', 'var'])
tools.assert_equals(10, parameters['offset'].literal)
def test_limit_by_model(self):
var_name, parameters = listing_parse(['listing', '1', 'of', 'articles.article', 'as', 'var'])
tools.assert_equals('var', var_name)
tools.assert_equals(1, parameters['count'].literal)
tools.assert_equals([self.act], parameters['content_types'])
def test_limit_bu_more_models(self):
var_name, parameters = listing_parse(['listing', '1', 'of', 'articles.article,photos.photo', 'as', 'var'])
tools.assert_equals([self.act, self.pct], parameters['content_types'])
def test_limit_bu_more_models_space(self):
var_name, parameters = listing_parse(['listing', '1', 'of', 'articles.article,', 'photos.photo', 'as', 'var'])
tools.assert_equals([self.act, self.pct], parameters['content_types'])
def test_limit_bu_more_models_space_around_comma(self):
var_name, parameters = listing_parse(['listing', '1', 'of', 'articles.article', ',', 'photos.photo', 'as', 'var'])
tools.assert_equals([self.act, self.pct], parameters['content_types'])
def test_limit_by_category(self):
var_name, parameters = listing_parse(['listing', '1', 'for', 'category', 'as', 'var'])
tools.assert_equals('category', parameters['category'].var)
def test_limit_by_category_with_descendents(self):
var_name, parameters = listing_parse(['listing', '1', 'for', 'category', 'with', 'descendents', 'as', 'var'])
tools.assert_equals('category', parameters['category'].var)
tools.assert_equals(ListingHandler.ALL, parameters['children'])
def test_limit_by_category_with_children(self):
var_name, parameters = listing_parse(['listing', '1', 'for', 'category', 'with', 'children', 'as', 'var'])
tools.assert_equals('category', parameters['category'].var)
tools.assert_equals(ListingHandler.IMMEDIATE, parameters['children'])
def test_ct_with_desc_using(self):
var_name, parameters = listing_parse("listing 10 of articles.article with descendents using 'most-viewed' as most_viewed_listings".split())
tools.assert_equals(ListingHandler.ALL, parameters['children'])
tools.assert_equals(Category.objects.get_by_tree_path(''), parameters['category'])
class TestBoxTag(UnitTestCase):
def tearDown(self):
super(TestBoxTag, self).tearDown()
template_loader.templates = {}
def test_renders_correct_template(self):
template_loader.templates['box/box.html'] = '{{ object }}'
t = template.Template('{% box name for sites.site with pk 1 %}{% endbox %}')
tools.assert_equals('example.com', t.render(template.Context()))
def test_params_are_parsed(self):
template_loader.templates['box/box.html'] = '{% for k,v in box.params.items %}{{k}}:{{v}}|{% endfor %}'
t = template.Template('''{% box name for sites.site with pk 1 %}
level: 2
some_other_param: xxx
{% endbox %}''')
tools.assert_equals('some_other_param:xxx|level:2|', t.render(template.Context()))
def test_box_wirks_with_variable_instead_of_lookup(self):
site = Site.objects.get(pk=1)
template_loader.templates['box/box.html'] = '{{ object }}'
t = template.Template('{% box name for var %}{% endbox %}')
tools.assert_equals('example.com', t.render(template.Context({'var': site})))
def test_box_for_empty_object_renders_empty(self):
template_loader.templates['box/box.html'] = 'XXX'
t = template.Template('{% box name for var %}{% endbox %}')
tools.assert_equals('', t.render(template.Context({'var': None})))
class TestBoxTagParser(UnitTestCase):
def test_parse_box_with_pk(self):
node = _parse_box([], ['box', 'box_type', 'for', 'core.category', 'with', 'pk', '1'])
tools.assert_true(isinstance(node, BoxNode))
tools.assert_equals('box_type', node.box_type)
tools.assert_equals(Category, node.model)
tools.assert_equals(('pk', 1), node.lookup)
def test_parse_box_for_varname(self):
node = _parse_box([], ['box', 'other_box_type', 'for', 'var_name'])
tools.assert_true(isinstance(node, BoxNode))
tools.assert_equals('other_box_type', node.box_type)
tools.assert_equals('var_name', node.var.var)
def test_parse_box_with_slug(self):
node = _parse_box([], ['box', 'box_type', 'for', 'sites.site', 'with', 'slug', '"home"'])
tools.assert_true(isinstance(node, BoxNode))
tools.assert_equals('box_type', node.box_type)
tools.assert_equals(Site, node.model)
tools.assert_equals(('slug', 'home'), node.lookup)
def test_parse_raises_on_too_many_arguments(self):
tools.assert_raises(TemplateSyntaxError, _parse_box, [], ['box', 'box_type', 'for', 'core.category', 'with', 'pk', '1', '2', 'extra'])
def test_parse_raises_on_too_few_arguments(self):
tools.assert_raises(TemplateSyntaxError, _parse_box, [], ['box', 'box_type', 'for'])
def test_parse_raises_on_incorrect_arguments(self):
tools.assert_raises(TemplateSyntaxError, _parse_box, [], ['box', 'box_type', 'not a for', 'core.category', 'with', 'pk', '1'])
def test_parse_return_empty_node_on_incorrect_model(self):
node = _parse_box([], ['box', 'box_type', 'for', 'not_app.not_model', 'with', 'pk', '1'])
tools.assert_true(isinstance(node, EmptyNode))
|
"""
One epoch -> forward & backward pass of all training samples.
batch_size -> number of training samples in one forward & backward pass.
number of iterations -> number of passes, each pass using [batch_size] number of samples.
e.g. 100 samples, batch_size=20 -> 100/20 = 5 iterations for 1 epoch.
"""
import math
import torch
from sklearn.datasets import load_wine
from torch.utils.data import Dataset, DataLoader
class WineDataSet(Dataset):
def __init__(self, X, y):
self.X = torch.from_numpy(X)
y = torch.from_numpy(y)
self.y = y.view(y.shape[0], 1)
self.n_samples = self.X.shape[0]
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return self.n_samples
loader = load_wine()
X, y = loader["data"], loader["target"]
dataset = WineDataSet(X, y)
dataloader = DataLoader(dataset=dataset, batch_size=4,
shuffle=True, num_workers=2)
dataiter = iter(dataloader)
data = dataiter.next()
features, labels = data
print(features, labels)
# training loop
num_epochs = 2
total_samples = len(dataset)
n_iteratios = math.ceil(total_samples / 4)
print(total_samples, n_iteratios)
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(dataloader):
# forward backward, update
if (i + 1) % 5 == 0:
print(f"epoch {epoch+1}/{num_epochs}, step {i+1}/{n_iteratios}, inputs {inputs.shape}")
# some famous dataset
# torchvision.datasets.MNIST()
# torchvision.datasets.FashionMNIST()
|
from __future__ import unicode_literals, absolute_import
import click, os, ConfigParser, getpass
@click.command('setup-custom-app')
def setup_custom_app():
"""Generate config for supervisor, nginx and Procfile"""
try:
if click.confirm('This will add custom node to existing supervisor config file. Continue?'):
setup_supervisor()
if click.confirm('This will add custom node to existing Procfile. Continue?'):
setup_procfile()
except Exception as e:
print(e)
def get_bench_dir():
if os.getcwd().split('/')[-1] == 'sites':
return os.path.split(os.getcwd())[0]
else:
return os.path.abspath('.')
def setup_supervisor():
conf_file = get_bench_dir() + '/config/supervisor.conf'
if not os.path.isfile(conf_file):
raise IOError(conf_file + " does not exist.")
bench_dir = get_bench_dir().split('/')[-1]
new_section = "program:" + bench_dir + "-" + get_app_name(slug=False)
node_command = get_node_command()
node_stdout_logfile = get_bench_dir() + "/logs/" + get_app_name(slug=True) + ".log"
node_stderr_logfile = get_bench_dir() + "/logs/" + get_app_name(slug=True) + ".error.log"
config_parser = ConfigParser.ConfigParser()
config_parser.read(conf_file)
config_parser.add_section(new_section)
config_parser.set(new_section, 'command', node_command)
config_parser.set(new_section, 'priority', 4)
config_parser.set(new_section, 'autostart', "true")
config_parser.set(new_section, 'autorestart', "true")
config_parser.set(new_section, 'stdout_logfile', node_stdout_logfile)
config_parser.set(new_section, 'stderr_logfile', node_stderr_logfile)
config_parser.set(new_section, 'user', getpass.getuser())
config_parser.set(new_section, 'directory', get_bench_dir())
# add group
config_parser.add_section("group:" + bench_dir + "-" + get_app_name(slug=False))
config_parser.set("group:" + bench_dir + "-" + get_app_name(slug=False),
"programs", bench_dir + "-" + get_app_name(slug=False))
with open(conf_file, 'ab') as f:
config_parser.write(f)
def setup_procfile():
found = False
procfile = get_bench_dir() + "/Procfile"
node_command = get_node_command()
if not os.path.isfile(procfile):
raise IOError(procfile + " does not exist.")
with open(procfile) as f:
l = list(f)
with open(procfile, 'w') as output:
for line in l:
if line.startswith(get_app_name(slug=True)):
found = True
output.write(get_app_name(slug=True) + ': ' + node_command + "\n")
elif not found:
output.write(line)
if not found:
with open(procfile, 'ab') as f:
f.write(get_app_name(slug=True) + ': ' + node_command + "\n")
def get_app_name(slug=False):
if slug:
return "custom_node"
else:
return "custom-node"
def get_node_command():
# node_command = "/usr/bin/node /home/user/bench/apps/custom_app/custom_app.js"
node_command = "/usr/bin/node " + get_bench_dir()
node_command += "/apps/" + get_app_name(slug=True) + "/" + get_app_name(slug=True) + ".js"
return node_command
commands = [setup_custom_app]
|
import logging
from zipfile import ZipFile, ZIP_DEFLATED
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.beam_utils.io import (
dirname,
mkdirs_if_not_exists
)
def get_logger():
return logging.getLogger(__name__)
def load_pages(filename, page_range=None):
with FileSystems.open(filename) as f:
with ZipFile(f) as zf:
filenames = zf.namelist()
if page_range:
filenames = filenames[
max(0, page_range[0] - 1):
page_range[1]
]
for member_filename in filenames:
with zf.open(member_filename) as f:
yield f
def save_pages(output_filename, ext, bytes_by_page):
mkdirs_if_not_exists(dirname(output_filename))
with FileSystems.create(output_filename) as f:
with ZipFile(f, 'w', compression=ZIP_DEFLATED) as zf:
for i, data in enumerate(bytes_by_page):
page_filename = 'page-%s%s' % (1 + i, ext)
get_logger().debug('page_filename: %s', page_filename)
zf.writestr(page_filename, data)
return output_filename
|
import mellon
from sparc.testing.testlayer import SparcZCMLFileLayer
import os
import shutil
import tempfile
from zope import component
DEFAULT_snippet_lines_increment = 2
DEFAULT_snippet_lines_coverage = 5
DEFAULT_read_size = 512000
DEFAULT_snippet_bytes_increment = 7
DEFAULT_snippet_bytes_coverage = 8
class MellonFactoriesFilesystemLayer(SparcZCMLFileLayer):
def create_file(self, rel_path_list, type_, length):
if type_ == 'binary':
with open(os.path.join(self.working_dir, *rel_path_list), 'w+b') as file:
file.write(os.urandom(length))
else:
with open(os.path.join(self.working_dir, *rel_path_list), 'w+t') as file:
file.writelines(['{}{}'.format(i+1,os.linesep) for i in range(length)])
def setUp(self):
SparcZCMLFileLayer.setUp(self)
"""
1
/a
exact.txt
exact.bin
/b
[empty]
largest.txt
largest.bin
2
/c
[empty]
/d
larger.txt
larger.bin
small.txt
small.bin
*-small => smaller than coverage
*-exact => exact size as coverage
*-larger => a little larger than coverage
*-largest => much larger than coverage
"""
self.working_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.working_dir, '1','a'))
os.makedirs(os.path.join(self.working_dir, '1','b'))
os.makedirs(os.path.join(self.working_dir, '2','c'))
os.makedirs(os.path.join(self.working_dir, '2','d'))
self.create_file(['small.txt'], 'text', 4)
self.create_file(['small.bin'], 'binary', 128)
self.create_file(['1','largest.txt'], 'text', 1024)
self.create_file(['1','largest.bin'], 'binary', 2*DEFAULT_read_size*DEFAULT_snippet_bytes_coverage)
self.create_file(['1','a','exact.txt'], 'text', 5)
self.create_file(['1','a','exact.bin'], 'binary', DEFAULT_read_size*DEFAULT_snippet_bytes_coverage)
self.create_file(['2','d','larger.txt'], 'text', 8)
self.create_file(['2','d','larger.bin'], 'binary', DEFAULT_read_size*DEFAULT_snippet_bytes_coverage+DEFAULT_read_size)
config = {'MellonSnippet':
{
'lines_increment': DEFAULT_snippet_lines_increment,
'lines_coverage': DEFAULT_snippet_lines_coverage,
'bytes_read_size': DEFAULT_read_size,
'bytes_increment': DEFAULT_snippet_bytes_increment,
'bytes_coverage': DEFAULT_snippet_bytes_coverage
},
'FileSystemDir':
{
'directory': self.working_dir
}
}
self.config = component.createObject(u'sparc.configuration.container', config)
def tearDown(self):
if len(self.working_dir) < 3:
print('ERROR: working directory less than 3 chars long, unable to clean up: %s' % str(self.working_dir))
return
shutil.rmtree(self.working_dir)
SparcZCMLFileLayer.tearDown(self)
MELLON_FACTORIES_FILESYSTEM_LAYER = MellonFactoriesFilesystemLayer(mellon)
|
"""
gen_keys.py
Recovers a secret using the Fuzzy Key Recovery scheme
"""
import json
import click
from fuzzyvault import gen_keys, FuzzyError
def work(words, key_count, secret_path) -> None:
"""
1. re-create the state object from json file
2. call RecoverySecret on the recovery words (words)
3. print the recovery words
An FuzzyError exception is throw upon failure
"""
recovery_words = json.dumps([int(word) for word in words.split()], indent=2)
with open(secret_path, 'r') as fobj:
secret = fobj.read()
print(gen_keys(secret, recovery_words, key_count))
@click.command()
@click.option('--words',
type=str,
prompt="words",
required=True,
help='recovery words as integers eg. "8 6 0 3"')
@click.option('--key-count',
type=int,
default=1,
help='number of keys to be generated [default=1]')
@click.option('--secret',
type=str,
default='secret.json',
help='path to JSON file holding the secret (FuzzyState)')
def main(words, key_count, secret) -> None:
"""
generate keys from a secret
example:
python3 gen_keys.py --words "1 2 3" [--secret secret.json] [--key-count 1]
"""
work(words, key_count, secret)
if __name__ == '__main__':
try:
# pylint: disable=no-value-for-parameter
main()
# pylint: enable=no-value-for-parameter
except FuzzyError as error:
print("\nKey Recovery Failed:")
print(" ", error.message)
# work("1 2 3", 1, "output.json")
|
from .base import BaseModel, RelatedResourceMixin
from ..utils import enforce_string_type
class AddOn(BaseModel, RelatedResourceMixin):
_as_is_fields = ['max_days', 'min_days', 'product']
_date_fields = ['start_date', 'finish_date', 'halt_booking_date', 'request_space_date']
def __init__(self, *args, **kwargs):
self._resource_fields = []
super(AddOn, self).__init__(*args, **kwargs)
@enforce_string_type
def __repr__(self):
return '<{0} ({1})>'.format(self.__class__.__name__, self.product.name)
# product is marked as as-is, so we can turn it into a dynamically-typed resource, based on product['type']
# Is there a better way?
def _fill_fields(self, data):
super(AddOn, self)._fill_fields(data)
r = {
'activities': 'Activity',
'accommodations': 'Accommodation',
'transports': 'Transport',
'single_supplements': 'SingleSupplement',
}.get(self.product['type'])
if r:
self._resource_fields += [('product', r)]
self._set_resource_field('product', self.product)
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from vistrails.db.versions.v1_0_2.domain import DBVistrail, DBAnnotation, \
DBWorkflow, DBLog, DBRegistry, \
DBPortSpec, DBAdd, DBChange, DBDelete
from vistrails.core import debug
from vistrails.core.system import get_elementtree_library
ElementTree = get_elementtree_library()
import unittest
id_scope = None
def update_portSpec(old_obj, translate_dict):
global id_scope
sigs = []
defaults = []
labels = []
for psi in sorted(old_obj.db_portSpecItems, key=lambda x: x.db_pos):
sigs.append((psi.db_package, psi.db_module, psi.db_namespace))
defaults.append(psi.db_default)
labels.append(psi.db_label)
new_obj = DBPortSpec.update_version(old_obj, translate_dict)
sigstring = '(' + ','.join('%s:%s%s' %
(s[0], s[1], ":%s" % s[2] if s[2] else "")
for s in sigs) + ')'
new_obj.db_sigstring = sigstring
if all(not d for d in defaults):
new_obj.db_defaults = None
else:
new_obj.db_defaults = unicode(defaults)
if all(not label for label in labels):
new_obj.db_labels = None
else:
new_obj.db_labels = unicode(labels)
return new_obj
def update_portSpecs(old_obj, translate_dict):
new_port_specs = []
for port_spec in old_obj.db_portSpecs:
new_port_specs.append(update_portSpec(port_spec, translate_dict))
return new_port_specs
def update_portSpec_op(old_obj, translate_dict):
return update_portSpec(old_obj.db_data, translate_dict)
def translateVistrail(_vistrail):
""" Translate new DBVistrailVariable based vistrail variables to old
annotation based type """
global id_scope
def update_workflow(old_obj, trans_dict):
return DBWorkflow.update_version(old_obj.db_workflow,
trans_dict, DBWorkflow())
def update_operations(old_obj, trans_dict):
new_ops = []
for obj in old_obj.db_operations:
if obj.vtType == 'delete':
new_ops.append(DBDelete.update_version(obj, trans_dict))
elif obj.vtType == 'add':
if obj.db_what == 'portSpec':
trans_dict['DBAdd'] = {'data': update_portSpec_op}
new_op = DBAdd.update_version(obj, trans_dict)
new_ops.append(new_op)
del trans_dict['DBAdd']
else:
new_op = DBAdd.update_version(obj, trans_dict)
new_ops.append(new_op)
elif obj.vtType == 'change':
if obj.db_what == 'portSpec':
trans_dict['DBChange'] = {'data': update_portSpec_op}
new_op = DBChange.update_version(obj, trans_dict)
new_ops.append(new_op)
del trans_dict['DBChange']
else:
new_op = DBChange.update_version(obj, trans_dict)
new_ops.append(new_op)
return new_ops
vistrail = DBVistrail()
id_scope = vistrail.idScope
def update_annotations(old_obj, trans_dict):
new_annotations = []
for a in old_obj.db_annotations:
new_annotations.append(DBAnnotation.update_version(a,
translate_dict))
id_scope.updateBeginId(DBAnnotation.vtType, a.db_id)
vars = {}
for var in old_obj.db_vistrailVariables:
descriptor = (var.db_package, var.db_module, var.db_namespace)
vars[var.db_name] = (var.db_uuid, descriptor, var.db_value)
if vars:
new_id = id_scope.getNewId(DBAnnotation.vtType)
annotation = DBAnnotation(id=new_id, key='__vistrail_vars__',
value=str(vars))
new_annotations.append(annotation)
return new_annotations
translate_dict = {'DBModule': {'portSpecs': update_portSpecs},
'DBModuleDescriptor': {'portSpecs': update_portSpecs},
'DBAction': {'operations': update_operations},
'DBGroup': {'workflow': update_workflow},
'DBVistrail': {'annotations': update_annotations},
}
vistrail = DBVistrail.update_version(_vistrail, translate_dict, vistrail)
if _vistrail.db_parameter_explorations:
debug.warning(("Vistrail contains %s parameter explorations that "
"cannot be converted") % len(_vistrail.db_parameter_explorations))
vistrail.db_version = '1.0.2'
return vistrail
def translateWorkflow(_workflow):
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBModule': {'portSpecs': update_portSpecs},
'DBGroup': {'workflow': update_workflow}}
workflow = DBWorkflow.update_version(_workflow, translate_dict)
workflow.db_version = '1.0.2'
return workflow
def translateLog(_log):
translate_dict = {}
log = DBLog.update_version(_log, translate_dict)
log.db_version = '1.0.2'
return log
def translateRegistry(_registry):
global id_scope
translate_dict = {'DBModuleDescriptor': {'portSpecs': update_portSpecs}}
registry = DBRegistry()
id_scope = registry.idScope
vistrail = DBRegistry.update_version(_registry, translate_dict, registry)
registry.db_version = '1.0.2'
return registry
class TestTranslate(unittest.TestCase):
def testParamexp(self):
"""test translating parameter explorations from 1.0.3 to 1.0.2"""
from vistrails.db.services.io import open_bundle_from_zip_xml
from vistrails.core.system import vistrails_root_directory
import os
(save_bundle, vt_save_dir) = open_bundle_from_zip_xml(DBVistrail.vtType, \
os.path.join(vistrails_root_directory(),
'tests/resources/paramexp-1.0.3.vt'))
vistrail = translateVistrail(save_bundle.vistrail)
# paramexps cannot be downgraded but should produce a warning
def testVistrailvars(self):
"""test translating vistrail variables from 1.0.3 to 1.0.2"""
from vistrails.db.services.io import open_bundle_from_zip_xml
from vistrails.core.system import vistrails_root_directory
import os
(save_bundle, vt_save_dir) = open_bundle_from_zip_xml(DBVistrail.vtType, \
os.path.join(vistrails_root_directory(),
'tests/resources/visvar-1.0.3.vt'))
vistrail = translateVistrail(save_bundle.vistrail)
visvars = vistrail.db_annotations_key_index['__vistrail_vars__']
self.assertTrue(visvars.db_value)
if __name__ == '__main__':
from vistrails.gui.application import start_application
v = start_application({'interactiveMode': False,
'nologger': True,
'singleInstance': False,
'fixedSpreadsheetCells': True})
unittest.main()
|
from flask import Blueprint
bp = Blueprint('shopping', __name__)
from src.shopping import routes, forms
|
#!/usr/bin/env python
'''
@author Luke Campbell <My email is around here somewhere>
@file ion/processes/data/transforms/test/test_qc_post_processing.py
@date Tue May 7 15:34:54 EDT 2013
'''
from ion.services.dm.test.dm_test_case import DMTestCase
from interface.objects import ProcessDefinition
from pyon.core.exception import BadRequest
from nose.plugins.attrib import attr
from ion.services.dm.utility.granule import RecordDictionaryTool
from ion.services.dm.test.test_dm_end_2_end import DatasetMonitor
from ion.services.dm.utility.test.parameter_helper import ParameterHelper
from ion.util.stored_values import StoredValueManager
from pyon.util.containers import DotDict
from gevent.event import Event
from pyon.ion.event import EventSubscriber, EventPublisher
from uuid import uuid4
from interface.objects import ProcessStateEnum
from ion.services.cei.process_dispatcher_service import ProcessStateGate
from interface.services.cei.ischeduler_service import SchedulerServiceClient
from pyon.public import OT
import time
import numpy as np
from gevent.queue import Queue, Empty
import unittest
@unittest.skip("QC Processing Dependent on M088")
@attr('INT',group='dm')
class TestQCPostProcessing(DMTestCase):
'''
ion/processes/data/transforms/test/test_qc_post_processing.py:TestQCPostProcessing
'''
def setUp(self):
DMTestCase.setUp(self)
process_definition = ProcessDefinition(name='qc_post_processor',
executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
self.process_definition_id = self.process_dispatcher.create_process_definition(process_definition)
self.addCleanup(self.process_dispatcher.delete_process_definition,self.process_definition_id)
self.process_id = self.process_dispatcher.create_process(self.process_definition_id)
self.scheduler_service = SchedulerServiceClient()
def populate_qc_tables(self):
svm = StoredValueManager(self.container)
svm.stored_value_cas('grt_QCTEST_TEMPWAT', {'grt_min_value':-2., 'grt_max_value':40.})
svm.stored_value_cas('svt_QCTEST_TEMPWAT', {'svt_resolution':0.001, 'svt_n': 4})
svm.stored_value_cas('spike_QCTEST_TEMPWAT', {'acc': 0.1, 'spike_n':5, 'spike_l':5})
def sync_launch(self, config):
self.process_dispatcher.schedule_process(self.process_definition_id, process_id=self.process_id, configuration=config)
gate = ProcessStateGate(self.process_dispatcher.read_process,
self.process_id,
ProcessStateEnum.RUNNING)
self.assertTrue(gate.await(30))
self.addCleanup(self.process_dispatcher.cancel_process, self.process_id)
def make_data_product(self):
ph = ParameterHelper(self.dataset_management, self.addCleanup)
pdict_id = ph.create_simple_qc_pdict()
stream_def_id = self.create_stream_definition('global range', parameter_dictionary_id=pdict_id, stream_configuration={'reference_designator':'QCTEST'})
self.populate_qc_tables()
dp_id = self.create_data_product('qc data product', stream_def_id=stream_def_id)
self.activate_data_product(dp_id)
dataset_id = self.RR2.find_dataset_id_of_data_product_using_has_dataset(dp_id)
return dp_id, dataset_id, stream_def_id
def make_large_dataset(self, temp_vector):
monitor_queue = Queue()
# Make 27 hours of data
ph = ParameterHelper(self.dataset_management, self.addCleanup)
data_product_id, dataset_id, stream_def_id = self.make_data_product()
es = EventSubscriber(event_type=OT.DatasetModified, origin=dataset_id, auto_delete=True, callback = lambda *args, **kwargs : monitor_queue.put(1))
es.start()
self.addCleanup(es.stop)
for rdt in self.populate_vectors(stream_def_id, 3, temp_vector):
ph.publish_rdt_to_data_product(data_product_id, rdt)
try:
for i in xrange(3):
monitor_queue.get(timeout=10)
except Empty:
raise AssertionError('Failed to populate dataset in time')
return data_product_id
def populate_vectors(self, stream_def_id, hours, temp_vector):
now = time.time()
ntp_now = now + 2208988800
for i in xrange(hours):
rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
st = ntp_now - (3600 * (hours-i))
et = ntp_now - (3600 * (hours - (i+1)))
rdt['time'] = np.arange(st, et)
rdt['temp'] = temp_vector(3600)
yield rdt
def process_execution(self, temp_vector, qc_params, bad_times):
interval_key = uuid4().hex
data_product_id = self.make_large_dataset(temp_vector)
async_queue = Queue()
def cb(event, *args, **kwargs):
if '_'.join(event.qc_parameter.split('_')[1:]) not in qc_params:
# I don't care about
return
times = event.temporal_values
self.assertEquals(len(times), bad_times)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=data_product_id, callback=cb, auto_delete=True)
es.start()
self.addCleanup(es.stop)
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = qc_params
self.sync_launch(config)
# So now the process is started, time to throw an event at it
ep = EventPublisher(event_type='TimerEvent')
ep.publish_event(origin=interval_key)
try:
async_queue.get(timeout=120)
except Empty:
raise AssertionError('QC was not flagged in time')
def test_glblrng_qc_processing(self):
def temp_vector(size):
return [41] + [39]*(size-1)
self.process_execution(temp_vector, ['glblrng_qc'], 1)
def test_stuckvl_qc_processing(self):
def temp_vector(size):
assert size > 7
return [20] * 6 + range(size-6)
self.process_execution(temp_vector, ['stuckvl_qc'], 6)
def test_spketst_qc_processing(self):
def temp_vector(size):
assert size > 8
return [-1, 3, 40, -1, 1, -6, -6, 1] + [5] * (size-8)
self.process_execution(temp_vector, ['spketst_qc'], 1)
def test_qc_interval_integration(self):
# 1 need to make a dataset that only has one discrete qc violation
# 2 Launch the process
# 3 Setup the scheduler to run it say three times
# 4 Get the Events and verify the data
#--------------------------------------------------------------------------------
# Make a dataset that has only one discrete qc violation
#--------------------------------------------------------------------------------
dp_id, dataset_id, stream_def_id = self.make_data_product()
ph = ParameterHelper(self.dataset_management, self.addCleanup)
monitor = DatasetMonitor(dataset_id)
self.addCleanup(monitor.stop)
for rdt in self.populate_vectors(stream_def_id, 1, lambda x : [41] + [39] * (x-1)):
ph.publish_rdt_to_data_product(dp_id, rdt)
self.assertTrue(monitor.event.wait(10)) # Give it 10 seconds to populate
#--------------------------------------------------------------------------------
# Launch the process
#--------------------------------------------------------------------------------
interval_key = uuid4().hex
config = DotDict()
config.process.interval_key = interval_key
config.process.qc_params = ['glblrng_qc'] # The others are tested in other tests for completeness
self.sync_launch(config)
async_queue = Queue()
def callback(event, *args, **kwargs):
times = event.temporal_values
self.assertEquals(len(times), 1)
async_queue.put(1)
es = EventSubscriber(event_type=OT.ParameterQCEvent, origin=dp_id, callback=callback, auto_delete=True)
es.start()
self.addCleanup(es.stop)
#--------------------------------------------------------------------------------
# Setup the scheduler
#--------------------------------------------------------------------------------
timer_id = self.scheduler_service.create_interval_timer(start_time=time.time(),
end_time=time.time()+13,
interval=5,
event_origin=interval_key)
#--------------------------------------------------------------------------------
# Get the events and verify them
#--------------------------------------------------------------------------------
try:
for i in xrange(2):
async_queue.get(timeout=10)
except Empty:
raise AssertionError('QC Events not raised')
|
import logging
import re
import sys
import pytest
from pyscaffold.exceptions import ErrorLoadingExtension, exceptions2exit
from pyscaffold.log import logger
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import EntryPoint # pragma: no cover
else:
from importlib_metadata import EntryPoint # pragma: no cover
def test_exceptions2exit():
@exceptions2exit([RuntimeError])
def func(_):
raise RuntimeError("Exception raised")
with pytest.raises(SystemExit):
func(1)
def test_exceptions2exit_verbose(capsys):
@exceptions2exit([RuntimeError])
def func(_):
logger.level = logging.DEBUG
raise RuntimeError("Exception raised")
with pytest.raises(SystemExit):
func(1)
error = capsys.readouterr().err
match = re.search(r"raise RuntimeError", error)
assert match
def test_error_loading_external_extension():
# Assert the error message displays some meaningful text
extension = "pyscaffoldext.fake.extension"
# Extension name is given directly
ex = str(ErrorLoadingExtension(extension))
assert "an error loading" in ex
assert "fake" in ex
# Entrypoint is given
fake = EntryPoint("fake", f"{extension}:Fake", "pyscaffold.cli")
ex = str(ErrorLoadingExtension(entry_point=fake))
assert "an error loading" in ex
assert "fake" in ex
|
# tested
from boa.builtins import sha1, sha256, hash160, hash256
def Main(operation, a, b):
if operation == 'omin':
return min(a, b)
elif operation == 'omax':
return max(a, b)
elif operation == 'oabs':
return abs(a)
elif operation == 'sha1':
return sha1(a)
elif operation == 'sha256':
return sha256(a)
elif operation == 'hash160':
return hash160(a)
elif operation == 'hash256':
return hash256(a)
return 'unknown'
|
import setuptools
setuptools.setup(
name="repo2shellscript",
# https://github.com/jupyter/repo2docker/pull/848 was merged!
install_requires=[
"dockerfile-parse",
"jupyter-repo2docker@git+https://github.com/jupyterhub/repo2docker.git@master", # noqa: E501
"importlib_resources;python_version<'3.7'",
],
python_requires=">=3.5",
author="Simon Li",
url="https://github.com/manics/repo2shellscript",
description="Repo2docker shell-script extension",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
use_scm_version={"write_to": "repo2shellscript/_version.py"},
setup_requires=["setuptools_scm"],
license="BSD",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
packages=setuptools.find_packages(),
include_package_data=True,
entry_points={
"repo2docker.engines": ["shellscript = repo2shellscript:ShellScriptEngine"]
},
)
|
import threading
class OrderTracker:
def __init__(self):
"""
Simple container for data structures that map unique ids to the
Order objects associated with that id and the corresponding locks
to those data structures to prevent race conditions.
There are maps by both client order id as well as order id because
of FIX-specific terminology.
As an example, self.orders_by_cl_oid maps a unique client order id
to the Order object associated with that client order id.
"""
self.orders_by_cl_oid = {}
self.orders_by_cl_oid_lock = threading.RLock()
self.orders_by_oid = {}
self.order_by_oid_lock = threading.RLock()
|
from PIL import Image, ImageDraw
from geometry import Point
import math
import animation
#maps an x in the range(0,1) to a y in the range (0,1).
#when x is 0, y is 0; when x is 1, y is 1.
#for all intermediate values, y may differ from x.
def ease(x, kind="linear"):
f = {
"linear": lambda x: x,
"trig": lambda x: 1-((math.cos(x*math.pi)/2)+0.5),
"cubic": lambda x: 3*x**2 - 2*x**3
}[kind]
return f(x)
def iter_lattice(ul, br):
for i in range(ul.x, br.x):
for j in range(ul.y, br.y):
yield Point(i,j)
def exp(angle, radius):
x = math.cos(angle) * radius
y = math.sin(angle) * radius
return Point(x,y)
def render_squares(**kargs):
square_size = kargs.get("square_size", 128)
cols = kargs.get("cols", 8)
fg = kargs.get("fg", "black")
bg = kargs.get("bg", "white")
theta = kargs.get("theta", math.radians(45))
parity = kargs.get("parity", 0)
size = cols * square_size
square_radius = math.sqrt(2) * square_size / 2
img = Image.new("RGB", (size, size), bg)
draw = ImageDraw.Draw(img)
for p in iter_lattice(Point(-1,-1), Point(1+cols, 1+cols)):
if (p.x+p.y) % 2 == parity: continue
square_center = (p + Point(0.5, 0.5)) * square_size
corners = [square_center + exp(theta + math.radians(angle), square_radius) for angle in (45, 135, 225, 315)]
draw.polygon([p.map(round).tuple() for p in corners], outline=fg, fill=fg)
return img.resize((size/2, size/2), Image.ANTIALIAS)
frames = []
total_frames = 32
for i in range(total_frames):
print i
f = float(i) / total_frames
f = ease(f, "cubic")
theta = math.radians(f * 90)
frames.append(render_squares(theta=theta))
for i in range(8): frames.append(frames[-1])
for i in range(total_frames):
print i
f = float(i) / total_frames
f = ease(f, "cubic")
theta = math.radians(f * -90)
frames.append(render_squares(fg="white", bg="black", parity=1, theta=theta))
for i in range(8): frames.append(frames[-1])
animation.make_gif(frames)
|
"""
MIT License
Copyright (c) 2021 Hyeonki Hong <hhk7734@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any
from .base_layer import BaseLayer
class RouteLayer(BaseLayer):
def __init__(self, index: int, type_index: int):
super().__init__(index=index, type_index=type_index, type_name="route")
self._groups = 1
self._group_id = 0
self._layers: tuple
@property
def bflops(self) -> float:
return 0
@property
def groups(self) -> int:
return self._groups
@property
def group_id(self) -> int:
return self._group_id
@property
def layers(self) -> tuple:
return self._layers
def __repr__(self) -> str:
rep = f"{self.index:4} "
rep += f"{self.type[:5]}_"
rep += f"{self.type_index:<3} "
for layer in self.layers:
rep += f"{layer:3},"
rep += " " * 4 * (6 - len(self.layers))
rep += " -> "
rep += f"{self.output_shape[0]:4} "
rep += f"x{self.output_shape[1]:4} "
rep += f"x{self.output_shape[2]:4}"
return rep
def __setitem__(self, key: str, value: Any):
if key in ("groups", "group_id"):
self.__setattr__(f"_{key}", int(value))
elif key in ("layers",):
self.__setattr__(
f"_{key}",
tuple(
int(i) if int(i) >= 0 else self._index_ + int(i)
for i in value.split(",")
),
)
elif key == "input_shape":
self.__setattr__(f"_{key}", value)
if self._groups != 1:
# split
self._output_shape = (
self._input_shape[0],
self._input_shape[1],
self._input_shape[2] // self._groups,
)
else:
if len(self._layers) == 1:
# route
self._output_shape = self._input_shape
else:
# concatenate
output_shape = [*self._input_shape[0]]
output_shape[-1] = 0
for input_shape in self._input_shape:
output_shape[-1] += input_shape[-1]
self._output_shape = tuple(output_shape)
else:
raise KeyError(f"'{key}' is not supported")
|
"""Set of models for the user blueprint"""
from typing import TYPE_CHECKING, List, Union
from flask_login import UserMixin
from werkzeug.security import check_password_hash, generate_password_hash
from ..database import (
db,
Column,
Integer,
Model,
String,
Text,
relationship,
)
if TYPE_CHECKING:
from ..task.models import Task
class User(UserMixin, Model):
"""User model"""
user_id = Column(Integer, primary_key=True)
email = Column(String(320), nullable=False, unique=True)
# supposedly 320 is the maximum length of an e-mail address
password_hash = Column(Text, nullable=False)
tasks: List["Task"] = relationship(
"Task", order_by="Task.task_id", back_populates="user", lazy=False
)
def __repr__(self):
return f"User: id={self.user_id}, e-mail={self.email}"
def authenticate(self, password: str) -> bool:
"""Authentication helper."""
return check_password_hash(self.password_hash, password)
def get_id(self) -> int:
"""
Getter for UserMixin
I don't like to use id because it is
a builtin function, so instead I choose
property names like user_id or task_id.
As a consequence, UserMixin which expects
an "id" property must be told how to get it
"""
return self.user_id
def get_num_completed(self) -> int:
"""Helper to get the number of tasks the user has completed."""
return sum([1 for t in self.tasks if t.completed])
@staticmethod
def get_by_email(email: str) -> Union["User", None]:
"""Simple helper to return the"""
return User.query.filter_by(email=email).first()
@staticmethod
def create_user(email: str, password: str) -> "User":
"""Create user factory method."""
# bcrypt is an alternative that is supposedly more
# secure, but not necessary for this project
password_hash = generate_password_hash(password)
existing_user = db.session.query(User).filter_by(email=email).first()
if existing_user:
raise ValueError("User with that e-mail already exists!")
else:
user = User(email=email, password_hash=password_hash) # type: ignore
db.session.add(user)
db.session.commit()
return user
|
#!/usr/bin/python
"""
@author: Michael Rapp (mrapp@ke.tu-darmstadt.de)
"""
import numpy as np
DTYPE_INTP = np.intp
DTYPE_UINT8 = np.uint8
DTYPE_UINT32 = np.uint32
DTYPE_FLOAT32 = np.float32
DTYPE_FLOAT64 = np.float64
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import typing
import opentelemetry.trace as trace
from opentelemetry.context.context import Context
from opentelemetry.trace.propagation import textmap
# Keys and values are strings of up to 256 printable US-ASCII characters.
# Implementations should conform to the `W3C Trace Context - Tracestate`_
# spec, which describes additional restrictions on valid field values.
#
# .. _W3C Trace Context - Tracestate:
# https://www.w3.org/TR/trace-context/#tracestate-field
_KEY_WITHOUT_VENDOR_FORMAT = r"[a-z][_0-9a-z\-\*\/]{0,255}"
_KEY_WITH_VENDOR_FORMAT = (
r"[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}"
)
_KEY_FORMAT = _KEY_WITHOUT_VENDOR_FORMAT + "|" + _KEY_WITH_VENDOR_FORMAT
_VALUE_FORMAT = (
r"[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]"
)
_DELIMITER_FORMAT = "[ \t]*,[ \t]*"
_MEMBER_FORMAT = "({})(=)({})[ \t]*".format(_KEY_FORMAT, _VALUE_FORMAT)
_DELIMITER_FORMAT_RE = re.compile(_DELIMITER_FORMAT)
_MEMBER_FORMAT_RE = re.compile(_MEMBER_FORMAT)
_TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS = 32
class TraceContextTextMapPropagator(textmap.TextMapPropagator):
"""Extracts and injects using w3c TraceContext's headers.
"""
_TRACEPARENT_HEADER_NAME = "traceparent"
_TRACESTATE_HEADER_NAME = "tracestate"
_TRACEPARENT_HEADER_FORMAT = (
"^[ \t]*([0-9a-f]{2})-([0-9a-f]{32})-([0-9a-f]{16})-([0-9a-f]{2})"
+ "(-.*)?[ \t]*$"
)
_TRACEPARENT_HEADER_FORMAT_RE = re.compile(_TRACEPARENT_HEADER_FORMAT)
def extract(
self,
get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> Context:
"""Extracts SpanContext from the carrier.
See `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`
"""
header = get_from_carrier(carrier, self._TRACEPARENT_HEADER_NAME)
if not header:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
match = re.search(self._TRACEPARENT_HEADER_FORMAT_RE, header[0])
if not match:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
version = match.group(1)
trace_id = match.group(2)
span_id = match.group(3)
trace_flags = match.group(4)
if trace_id == "0" * 32 or span_id == "0" * 16:
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "00":
if match.group(5):
return trace.set_span_in_context(trace.INVALID_SPAN, context)
if version == "ff":
return trace.set_span_in_context(trace.INVALID_SPAN, context)
tracestate_headers = get_from_carrier(
carrier, self._TRACESTATE_HEADER_NAME
)
tracestate = _parse_tracestate(tracestate_headers)
span_context = trace.SpanContext(
trace_id=int(trace_id, 16),
span_id=int(span_id, 16),
is_remote=True,
trace_flags=trace.TraceFlags(trace_flags),
trace_state=tracestate,
)
return trace.set_span_in_context(
trace.DefaultSpan(span_context), context
)
def inject(
self,
set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
carrier: textmap.TextMapPropagatorT,
context: typing.Optional[Context] = None,
) -> None:
"""Injects SpanContext into the carrier.
See `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`
"""
span = trace.get_current_span(context)
span_context = span.get_context()
if span_context == trace.INVALID_SPAN_CONTEXT:
return
traceparent_string = "00-{:032x}-{:016x}-{:02x}".format(
span_context.trace_id,
span_context.span_id,
span_context.trace_flags,
)
set_in_carrier(
carrier, self._TRACEPARENT_HEADER_NAME, traceparent_string
)
if span_context.trace_state:
tracestate_string = _format_tracestate(span_context.trace_state)
set_in_carrier(
carrier, self._TRACESTATE_HEADER_NAME, tracestate_string
)
def _parse_tracestate(header_list: typing.List[str]) -> trace.TraceState:
"""Parse one or more w3c tracestate header into a TraceState.
Args:
string: the value of the tracestate header.
Returns:
A valid TraceState that contains values extracted from
the tracestate header.
If the format of one headers is illegal, all values will
be discarded and an empty tracestate will be returned.
If the number of keys is beyond the maximum, all values
will be discarded and an empty tracestate will be returned.
"""
tracestate = trace.TraceState()
value_count = 0
for header in header_list:
for member in re.split(_DELIMITER_FORMAT_RE, header):
# empty members are valid, but no need to process further.
if not member:
continue
match = _MEMBER_FORMAT_RE.fullmatch(member)
if not match:
# TODO: log this?
return trace.TraceState()
key, _eq, value = match.groups()
if key in tracestate: # pylint:disable=E1135
# duplicate keys are not legal in
# the header, so we will remove
return trace.TraceState()
# typing.Dict's update is not recognized by pylint:
# https://github.com/PyCQA/pylint/issues/2420
tracestate[key] = value # pylint:disable=E1137
value_count += 1
if value_count > _TRACECONTEXT_MAXIMUM_TRACESTATE_KEYS:
return trace.TraceState()
return tracestate
def _format_tracestate(tracestate: trace.TraceState) -> str:
"""Parse a w3c tracestate header into a TraceState.
Args:
tracestate: the tracestate header to write
Returns:
A string that adheres to the w3c tracestate
header format.
"""
return ",".join(key + "=" + value for key, value in tracestate.items())
|
from abc import ABC, abstractmethod
from copy import copy
from shutil import rmtree
from typing import Any, Union, Optional
from pathlib import Path
import os
import pickle
import pandas as pd
def _is_empty(data: Optional[pd.DataFrame]) -> bool:
return data is None or (isinstance(data, pd.DataFrame) and not len(data))
class Cache(ABC):
"""
Abstract class which defines the caching interface.
Empty values (`None` or an empty :class:`pandas.DataFrame`) will not be saved in the cache.
"""
@abstractmethod
def __getitem__(self, key: str) -> Optional[Any]:
pass
@abstractmethod
def __setitem__(self, key: str, value: Any) -> None:
pass
@abstractmethod
def __len__(self) -> int:
pass
@abstractmethod
def clear(self) -> None: # noqa: D102
pass
@property
@abstractmethod
def path(self) -> Optional[Union[str, Path]]: # noqa: D102
pass
@abstractmethod
def __str__(self) -> str:
pass
def __repr__(self) -> str:
return str(self)
class FileCache(Cache):
"""
Cache which persists the data into :mod:`pickle` files.
Parameters
----------
path
Path to a directory where the files will be stored.
"""
_suffix = ".pickle"
def __init__(self, path: Union[str, Path]):
if not isinstance(path, (str, Path)):
raise TypeError(
f"Expected `path` to be either `str` or `pathlib.Path`, "
f"found `{type(path).__name__}`."
)
if not str(path):
raise ValueError("Empty cache path.")
self._cache_dir = Path(path)
def __contains__(self, key: str) -> bool:
if not key.endswith(self._suffix):
key += self._suffix
return (self._cache_dir / key).is_file()
def __setitem__(self, key: str, value: Any) -> None:
if _is_empty(value):
return
self._cache_dir.mkdir(parents=True, exist_ok=True)
fname = str(key)
if not fname.endswith(self._suffix):
fname += self._suffix
with open(self._cache_dir / fname, "wb") as fout:
pickle.dump(value, fout)
def __getitem__(self, key: str) -> Any:
if not key.endswith(self._suffix):
key += self._suffix
if not (self._cache_dir / key).is_file():
raise KeyError(self._cache_dir / key)
with open(self._cache_dir / key, "rb") as fin:
return pickle.load(fin)
def __len__(self) -> int:
return (
len([f for f in os.listdir(self.path) if str(f).endswith(self._suffix)])
if self.path.is_dir()
else 0
)
@property
def path(self) -> Path:
"""Return the directory where the cache files are stored."""
return self._cache_dir
def clear(self) -> None:
"""Remove all files and the directory under :attr:`path`."""
if self._cache_dir.is_dir():
rmtree(self._cache_dir)
def __str__(self) -> str:
return f"<{self.__class__.__name__}[size={len(self)}, path={str(self.path)!r}]>"
class MemoryCache(dict, Cache):
"""
Cache which persists the data into the memory.
Objects stored in the cache are copied using :func:`copy.copy``.
"""
@property
def path(self) -> Optional[str]:
"""Return `'memory'`."""
return "memory"
def __setitem__(self, key: str, value: Any) -> None:
if _is_empty(value):
return
# the value is usually a dataframe (copy for safety)
return super().__setitem__(key, copy(value))
def __getitem__(self, key: str) -> Any:
return copy(super().__getitem__(key))
def __str__(self) -> str:
return f"<{self.__class__.__name__}[size={len(self)}]>"
def __repr__(self) -> str:
return str(self)
def __copy__(self) -> "MemoryCache":
return self
def copy(self) -> "MemoryCache":
"""Return self."""
return self
class NoopCache(MemoryCache):
"""Cache which doesn't save anything."""
@property
def path(self) -> Optional[str]:
"""Return `None`."""
return None
def __setitem__(self, key: str, value: Any) -> None:
pass
def __str__(self):
return f"<{self.__class__.__name__}>"
def clear_cache() -> None:
"""Remove all cached data from :attr:`omnipath.options.cache`."""
from omnipath import options
options.cache.clear()
__all__ = [clear_cache]
|
import sys
from pathlib import Path
from numpy import loadtxt
import argparse
from copernicus_search import copernicus_search
userDataPath = Path(sys.path[0]).resolve() / '..' / 'userdata'
staticDataPath = Path(str(loadtxt(userDataPath / 'staticDataPath.txt', dtype='str')))
sys.path.insert(0, str(staticDataPath))
import values
numberOfResults = 20
filterTagName = 'sentinel3:cloudypixels'
filterAttribute = 'percentage'
filterMin = 50
filterMax = 100
filterDateRange = '2018-06-01T00:00:00.000Z TO NOW'
def search_for_cloudy(resultCount=numberOfResults):
urls = copernicus_search(resultCount, filterTagName, filterAttribute, filterMin, filterMax, filterDateRange)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--productCount', help='Number of SLSTR Products to Search For', type=int)
arguments = parser.parse_args()
if(arguments.productCount):
search_for_cloudy(arguments.productCount)
else:
search_for_cloudy()
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cugraph
from cugraph.tests import utils
import random
import numpy as np
# Temporarily suppress warnings till networkX fixes deprecation warnings
# (Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working) for
# python 3.7. Also, this import networkx needs to be relocated in the
# third-party group once this gets fixed.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import networkx as nx
# NOTE: Endpoint parameter is not currently being tested, there could be a test
# to verify that python raise an error if it is used
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [False, True]
DEFAULT_EPSILON = 0.0001
TINY_DATASETS = ['../datasets/karate.csv']
UNRENUMBERED_DATASETS = ['../datasets/karate.csv']
SMALL_DATASETS = ['../datasets/netscience.csv']
SUBSET_SIZE_OPTIONS = [4]
SUBSET_SEED_OPTIONS = [42]
# NOTE: The following is not really being exploited in the tests as the
# datasets that are used are too small to compare, but it ensures that both
# path are actually sane
RESULT_DTYPE_OPTIONS = [np.float32, np.float64]
# =============================================================================
# Comparison functions
# =============================================================================
def build_graphs(graph_file, directed=True):
# cugraph
cu_M = utils.read_csv_file(graph_file)
G = cugraph.DiGraph() if directed else cugraph.Graph()
G.from_cudf_edgelist(cu_M, source='0', destination='1')
G.view_adj_list() # Enforce generation before computation
# networkx
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(M, create_using=(nx.DiGraph() if directed
else nx.Graph()),
source='0', target='1')
return G, Gnx
def calc_betweenness_centrality(graph_file, directed=True, normalized=False,
weight=None, endpoints=False,
k=None, seed=None,
result_dtype=np.float32):
""" Generate both cugraph and networkx betweenness centrality
Parameters
----------
graph_file : string
Path to COO Graph representation in .csv format
directed : bool, optional, default=True
normalized : bool
True: Normalize Betweenness Centrality scores
False: Scores are left unnormalized
k : int or None, optional, default=None
int: Number of sources to sample from
None: All sources are used to compute
seed : int or None, optional, default=None
Seed for random sampling of the starting point
Returns
-------
cu_bc : dict
Each key is the vertex identifier, each value is the betweenness
centrality score obtained from cugraph betweenness_centrality
nx_bc : dict
Each key is the vertex identifier, each value is the betweenness
centrality score obtained from networkx betweenness_centrality
"""
G, Gnx = build_graphs(graph_file, directed=directed)
calc_func = None
if k is not None and seed is not None:
calc_func = _calc_bc_subset
elif k is not None:
calc_func = _calc_bc_subset_fixed
else: # We processed to a comparison using every sources
calc_func = _calc_bc_full
cu_bc, nx_bc = calc_func(G, Gnx, normalized=normalized, weight=weight,
endpoints=endpoints, k=k, seed=seed,
result_dtype=result_dtype)
return cu_bc, nx_bc
def _calc_bc_subset(G, Gnx, normalized, weight, endpoints, k, seed,
result_dtype):
# NOTE: Networkx API does not allow passing a list of vertices
# And the sampling is operated on Gnx.nodes() directly
# We first mimic acquisition of the nodes to compare with same sources
random.seed(seed) # It will be called again in nx's call
sources = random.sample(Gnx.nodes(), k)
df = cugraph.betweenness_centrality(G, normalized=normalized,
weight=weight,
endpoints=endpoints,
k=sources,
result_dtype=result_dtype)
nx_bc = nx.betweenness_centrality(Gnx, normalized=normalized, k=k,
seed=seed)
cu_bc = {key: score for key, score in
zip(df['vertex'].to_array(),
df['betweenness_centrality'].to_array())}
return cu_bc, nx_bc
def _calc_bc_subset_fixed(G, Gnx, normalized, weight, endpoints, k, seed,
result_dtype):
assert isinstance(k, int), "This test is meant for verifying coherence " \
"when k is given as an int"
# In the fixed set we compare cu_bc against itself as we random.seed(seed)
# on the same seed and then sample on the number of vertices themselves
if seed is None:
seed = 123 # random.seed(None) uses time, but we want same sources
random.seed(seed) # It will be called again in cugraph's call
sources = random.sample(range(G.number_of_vertices()), k)
# The first call is going to proceed to the random sampling in the same
# fashion as the lines above
df = cugraph.betweenness_centrality(G, k=k, normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=seed,
result_dtype=result_dtype)
# The second call is going to process source that were already sampled
# We set seed to None as k : int, seed : not none should not be normal
# behavior
df2 = cugraph.betweenness_centrality(G, k=sources, normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=None,
result_dtype=result_dtype)
cu_bc = {key: score for key, score in
zip(df['vertex'].to_array(),
df['betweenness_centrality'].to_array())}
cu_bc2 = {key: score for key, score in
zip(df2['vertex'].to_array(),
df2['betweenness_centrality'].to_array())}
return cu_bc, cu_bc2
def _calc_bc_full(G, Gnx, normalized, weight, endpoints,
k, seed,
result_dtype):
df = cugraph.betweenness_centrality(G, normalized=normalized,
weight=weight,
endpoints=endpoints,
result_dtype=result_dtype)
assert df['betweenness_centrality'].dtype == result_dtype, \
"'betweenness_centrality' column has not the expected type"
nx_bc = nx.betweenness_centrality(Gnx, normalized=normalized,
weight=weight,
endpoints=endpoints)
cu_bc = {key: score for key, score in
zip(df['vertex'].to_array(),
df['betweenness_centrality'].to_array())}
return cu_bc, nx_bc
# =============================================================================
# Utils
# =============================================================================
def compare_single_score(result, expected, epsilon):
"""
Compare value in score at given index with relative error
Parameters
----------
scores : DataFrame
contains 'cu' and 'nx' columns which are the values to compare
idx : int
row index of the DataFrame
epsilon : floating point
indicates relative error tolerated
Returns
-------
close : bool
True: Result and expected are close to each other
False: Otherwise
"""
close = np.isclose(result, expected, rtol=epsilon)
return close
# NOTE: We assume that both cugraph and networkx are generating dicts with
# all the sources, thus we can compare all of them
def compare_scores(cu_bc, ref_bc, epsilon=DEFAULT_EPSILON):
missing_key_error = 0
score_mismatch_error = 0
for vertex in ref_bc:
if vertex in cu_bc:
result = cu_bc[vertex]
expected = ref_bc[vertex]
if not compare_single_score(result, expected, epsilon=epsilon):
score_mismatch_error += 1
print("ERROR: vid = {}, cu = {}, "
"nx = {}".format(vertex, result, expected))
else:
missing_key_error += 1
print("[ERROR] Missing vertex {vertex}".format(vertex=vertex))
assert missing_key_error == 0, "Some vertices were missing"
assert score_mismatch_error == 0, "Some scores were not close enough"
def prepare_test():
gc.collect()
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_tiny(graph_file,
directed,
result_dtype):
"""Test Normalized Betweenness Centrality"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file, directed=directed,
normalized=True,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_unnormalized_tiny(graph_file,
directed,
result_dtype):
"""Test Unnormalized Betweenness Centrality"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file, directed=directed,
normalized=False,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', SMALL_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_small(graph_file,
directed,
result_dtype):
"""Test Unnormalized Betweenness Centrality"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file, directed=directed,
normalized=True,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', SMALL_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_unnormalized_small(graph_file,
directed,
result_dtype):
"""Test Unnormalized Betweenness Centrality"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file, directed=directed,
normalized=False,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', SMALL_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('subset_size', SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize('subset_seed', SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_subset_small(graph_file,
directed,
subset_size,
subset_seed,
result_dtype):
"""Test Unnormalized Betweenness Centrality using a subset
Only k sources are considered for an approximate Betweenness Centrality
"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
directed=directed,
normalized=True,
k=subset_size,
seed=subset_seed,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
# NOTE: This test should only be execute on unrenumbered datasets
# the function operating the comparison inside is first proceeding
# to a random sampling over the number of vertices (thus direct offsets)
# in the graph structure instead of actual vertices identifiers
@pytest.mark.parametrize('graph_file', UNRENUMBERED_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('subset_size', SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_fixed_sample(graph_file,
directed,
subset_size,
result_dtype):
"""Test Unnormalized Betweenness Centrality using a subset
Only k sources are considered for an approximate Betweenness Centrality
"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
directed=directed,
normalized=True,
k=subset_size,
seed=None,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', SMALL_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('subset_size', SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize('subset_seed', SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_unnormalized_subset_small(graph_file,
directed,
subset_size,
subset_seed,
result_dtype):
"""Test Unnormalized Betweenness Centrality on Graph on subset
Only k sources are considered for an approximate Betweenness Centrality
"""
prepare_test()
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
directed=directed,
normalized=False,
k=subset_size,
seed=subset_seed,
result_dtype=result_dtype)
compare_scores(cu_bc, nx_bc)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_unnormalized_endpoints_except(graph_file,
directed,
result_dtype):
"""Test calls betwenness_centrality unnormalized + endpoints"""
prepare_test()
with pytest.raises(NotImplementedError):
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
normalized=False,
endpoints=True,
directed=directed,
result_dtype=result_dtype)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_endpoints_except(graph_file,
directed,
result_dtype):
"""Test calls betwenness_centrality normalized + endpoints"""
prepare_test()
with pytest.raises(NotImplementedError):
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
normalized=True,
endpoints=True,
directed=directed,
result_dtype=result_dtype)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_unnormalized_weight_except(graph_file,
directed,
result_dtype):
"""Test calls betwenness_centrality unnormalized + weight"""
prepare_test()
with pytest.raises(NotImplementedError):
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
normalized=False,
weight=True,
directed=directed,
result_dtype=result_dtype)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize('result_dtype', RESULT_DTYPE_OPTIONS)
def test_betweenness_centrality_normalized_weight_except(graph_file,
directed,
result_dtype):
"""Test calls betwenness_centrality normalized + weight"""
prepare_test()
with pytest.raises(NotImplementedError):
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
normalized=True,
weight=True,
directed=directed,
result_dtype=result_dtype)
@pytest.mark.parametrize('graph_file', TINY_DATASETS)
@pytest.mark.parametrize('directed', DIRECTED_GRAPH_OPTIONS)
def test_betweenness_centrality_invalid_dtype(graph_file, directed):
"""Test calls betwenness_centrality normalized + weight"""
prepare_test()
with pytest.raises(TypeError):
cu_bc, nx_bc = calc_betweenness_centrality(graph_file,
normalized=True,
result_dtype=str,
directed=directed)
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from dataclasses import dataclass
from typing import Tuple
from pants.engine.console import Console
from pants.engine.fs import (
EMPTY_DIRECTORY_DIGEST,
Digest,
DirectoriesToMerge,
DirectoryToMaterialize,
Workspace,
)
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.isolated_process import ExecuteProcessResult
from pants.engine.legacy.graph import HydratedTargetsWithOrigins
from pants.engine.legacy.structs import TargetAdaptorWithOrigin
from pants.engine.objects import union
from pants.engine.rules import UnionMembership, goal_rule
from pants.engine.selectors import Get, MultiGet
@dataclass(frozen=True)
class FmtResult:
digest: Digest
stdout: str
stderr: str
@staticmethod
def noop() -> "FmtResult":
return FmtResult(digest=EMPTY_DIRECTORY_DIGEST, stdout="", stderr="")
@staticmethod
def from_execute_process_result(process_result: ExecuteProcessResult) -> "FmtResult":
return FmtResult(
digest=process_result.output_directory_digest,
stdout=process_result.stdout.decode(),
stderr=process_result.stderr.decode(),
)
@dataclass(frozen=True)
class AggregatedFmtResults:
"""This collection allows us to safely aggregate multiple `FmtResult`s for a language.
The `combined_digest` is used to ensure that none of the formatters overwrite each other. The
language implementation should run each formatter one at a time and pipe the resulting digest of
one formatter into the next. The `combined_digest` must contain all files for the target,
including any which were not re-formatted.
"""
results: Tuple[FmtResult, ...]
combined_digest: Digest
@union
class FormatTarget:
"""A union for registration of a formattable target type.
The union members should be subclasses of TargetAdaptorWithOrigin.
"""
@staticmethod
def is_formattable(
adaptor_with_origin: TargetAdaptorWithOrigin, *, union_membership: UnionMembership,
) -> bool:
is_fmt_target = union_membership.is_member(FormatTarget, adaptor_with_origin)
has_sources = hasattr(adaptor_with_origin.adaptor, "sources") and bool(
adaptor_with_origin.adaptor.sources.snapshot.files
)
return has_sources and is_fmt_target
class FmtOptions(GoalSubsystem):
"""Autoformat source code."""
# TODO: make this "fmt"
# Blocked on https://github.com/pantsbuild/pants/issues/8351
name = "fmt2"
required_union_implementations = (FormatTarget,)
class Fmt(Goal):
subsystem_cls = FmtOptions
@goal_rule
async def fmt(
console: Console,
targets_with_origins: HydratedTargetsWithOrigins,
workspace: Workspace,
union_membership: UnionMembership,
) -> Fmt:
adaptors_with_origins = [
TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin)
for target_with_origin in targets_with_origins
]
aggregated_results = await MultiGet(
Get[AggregatedFmtResults](FormatTarget, adaptor_with_origin)
for adaptor_with_origin in adaptors_with_origins
if FormatTarget.is_formattable(adaptor_with_origin, union_membership=union_membership)
)
individual_results = list(
itertools.chain.from_iterable(
aggregated_result.results for aggregated_result in aggregated_results
)
)
if not individual_results:
return Fmt(exit_code=0)
# NB: this will fail if there are any conflicting changes, which we want to happen rather than
# silently having one result override the other. In practicality, this should never happen due
# to our use of an aggregator rule for each distinct language.
merged_formatted_digest = await Get[Digest](
DirectoriesToMerge(
tuple(aggregated_result.combined_digest for aggregated_result in aggregated_results)
)
)
workspace.materialize_directory(DirectoryToMaterialize(merged_formatted_digest))
for result in individual_results:
if result.stdout:
console.print_stdout(result.stdout)
if result.stderr:
console.print_stderr(result.stderr)
# Since the rules to produce FmtResult should use ExecuteRequest, rather than
# FallibleExecuteProcessRequest, we assume that there were no failures.
return Fmt(exit_code=0)
def rules():
return [fmt]
|
#!/usr/bin/python
#
# Copyright (c) 2009-2021, Google LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Google LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Google LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared code for validating generated_file_staleness_test() rules.
This code is used by test scripts generated from
generated_file_staleness_test() rules.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
from shutil import copyfile
class _FilePair(object):
"""Represents a single (target, generated) file pair."""
def __init__(self, target, generated):
self.target = target
self.generated = generated
class Config(object):
"""Represents the configuration for a single staleness test target."""
def __init__(self, file_list):
# Duplicate to avoid modifying our arguments.
file_list = list(file_list)
# The file list contains a few other bits of information at the end.
# This is packed by the code in build_defs.bzl.
self.target_name = file_list.pop()
self.package_name = file_list.pop()
self.pattern = file_list.pop()
self.file_list = file_list
def _GetFilePairs(config):
"""Generates the list of file pairs.
Args:
config: a Config object representing this target's config.
Returns:
A list of _FilePair objects.
"""
ret = []
has_bazel_genfiles = os.path.exists("bazel-bin")
for filename in config.file_list:
target = os.path.join(config.package_name, filename)
generated = os.path.join(config.package_name, config.pattern % filename)
if has_bazel_genfiles:
generated = os.path.join("bazel-bin", generated)
# Generated files should always exist. Blaze should guarantee this before
# we are run.
if not os.path.isfile(generated):
print("Generated file '%s' does not exist." % generated)
print("Please run this command to generate it:")
print(" bazel build %s:%s" % (config.package_name, config.target_name))
sys.exit(1)
ret.append(_FilePair(target, generated))
return ret
def _GetMissingAndStaleFiles(file_pairs):
"""Generates lists of missing and stale files.
Args:
file_pairs: a list of _FilePair objects.
Returns:
missing_files: a list of _FilePair objects representing missing files.
These target files do not exist at all.
stale_files: a list of _FilePair objects representing stale files.
These target files exist but have stale contents.
"""
missing_files = []
stale_files = []
for pair in file_pairs:
if not os.path.isfile(pair.target):
missing_files.append(pair)
continue
with open(pair.generated) as g, open(pair.target) as t:
if g.read() != t.read():
stale_files.append(pair)
return missing_files, stale_files
def _CopyFiles(file_pairs):
"""Copies all generated files to the corresponding target file.
The target files must be writable already.
Args:
file_pairs: a list of _FilePair objects that we want to copy.
"""
for pair in file_pairs:
target_dir = os.path.dirname(pair.target)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
copyfile(pair.generated, pair.target)
def FixFiles(config):
"""Implements the --fix option: overwrites missing or out-of-date files.
Args:
config: the Config object for this test.
"""
file_pairs = _GetFilePairs(config)
missing_files, stale_files = _GetMissingAndStaleFiles(file_pairs)
_CopyFiles(stale_files + missing_files)
def CheckFilesMatch(config):
"""Checks whether each target file matches the corresponding generated file.
Args:
config: the Config object for this test.
Returns:
None if everything matches, otherwise a string error message.
"""
diff_errors = []
file_pairs = _GetFilePairs(config)
missing_files, stale_files = _GetMissingAndStaleFiles(file_pairs)
for pair in missing_files:
diff_errors.append("File %s does not exist" % pair.target)
continue
for pair in stale_files:
diff_errors.append("File %s is out of date" % pair.target)
if diff_errors:
error_msg = "Files out of date!\n\n"
error_msg += "To fix run THIS command:\n"
error_msg += " bazel-bin/%s/%s --fix\n\n" % (config.package_name,
config.target_name)
error_msg += "Errors:\n"
error_msg += " " + "\n ".join(diff_errors)
return error_msg
else:
return None
|
import csv
with open('test2.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
print('DB', end=' ')
first = True
for num in row:
if not first:
print('', end=', ')
first = False
if int(num) < 16:
print('$0', end='')
print(hex(int(num))[2:], end='')
else:
print('$', end='')
print(hex(int(num))[2:], end='')
print("")
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .account_id import AccountID
from .allow_trust_op_asset import AllowTrustOpAsset
from .uint32 import Uint32
__all__ = ["AllowTrustOp"]
class AllowTrustOp:
"""
XDR Source Code
----------------------------------------------------------------
struct AllowTrustOp
{
AccountID trustor;
union switch (AssetType type)
{
// ASSET_TYPE_NATIVE is not allowed
case ASSET_TYPE_CREDIT_ALPHANUM4:
AssetCode4 assetCode4;
case ASSET_TYPE_CREDIT_ALPHANUM12:
AssetCode12 assetCode12;
// add other asset types here in the future
}
asset;
// 0, or any bitwise combination of TrustLineFlags
uint32 authorize;
};
----------------------------------------------------------------
"""
def __init__(
self, trustor: AccountID, asset: AllowTrustOpAsset, authorize: Uint32,
) -> None:
self.trustor = trustor
self.asset = asset
self.authorize = authorize
def pack(self, packer: Packer) -> None:
self.trustor.pack(packer)
self.asset.pack(packer)
self.authorize.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "AllowTrustOp":
trustor = AccountID.unpack(unpacker)
asset = AllowTrustOpAsset.unpack(unpacker)
authorize = Uint32.unpack(unpacker)
return cls(trustor=trustor, asset=asset, authorize=authorize,)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "AllowTrustOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "AllowTrustOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.trustor == other.trustor
and self.asset == other.asset
and self.authorize == other.authorize
)
def __str__(self):
out = [
f"trustor={self.trustor}",
f"asset={self.asset}",
f"authorize={self.authorize}",
]
return f"<AllowTrustOp {[', '.join(out)]}>"
|
import pytest
from {{cookiecutter.package_name}}.{{cookiecutter.module_name}} import {{cookiecutter.class_name}}
def test_case_one():
pass
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
points = vtk.vtkPoints()
points.InsertNextPoint(0,-16,0)
points.InsertNextPoint(0,0,-14)
points.InsertNextPoint(0,0,14)
points.InsertNextPoint(14,0,0)
points.InsertNextPoint(10,20,-10)
points.InsertNextPoint(10,20,10)
points.InsertNextPoint(10,-20,-10)
points.InsertNextPoint(10,-20,10)
points.InsertNextPoint(-10,-20,-10)
points.InsertNextPoint(-10,-20,10)
points.InsertNextPoint(-10,20,-10)
points.InsertNextPoint(-10,20,10)
points.InsertNextPoint(-2,27,0)
points.InsertNextPoint(0,27,2)
points.InsertNextPoint(0,27,-2)
points.InsertNextPoint(2,27,0)
points.InsertNextPoint(-14,4,-1)
points.InsertNextPoint(-14,3,0)
points.InsertNextPoint(-14,5,0)
points.InsertNextPoint(-14,4,1)
points.InsertNextPoint(-1,38,-2)
points.InsertNextPoint(-1,38,2)
points.InsertNextPoint(2,35,-2)
points.InsertNextPoint(2,35,2)
points.InsertNextPoint(17,42,0)
points.InsertNextPoint(15,40,2)
points.InsertNextPoint(15,39,-2)
points.InsertNextPoint(13,37,0)
points.InsertNextPoint(19,-2,-2)
points.InsertNextPoint(19,-2,2)
points.InsertNextPoint(15,2,-2)
points.InsertNextPoint(15,2,2)
faces = vtk.vtkCellArray()
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(7)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(6)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(6)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(7)
faces.InsertCellPoint(9)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(9)
faces.InsertCellPoint(8)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(8)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(4)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(6)
faces.InsertCellPoint(8)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(8)
faces.InsertCellPoint(10)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(10)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(11)
faces.InsertCellPoint(9)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(9)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(7)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(5)
faces.InsertCellPoint(11)
faces.InsertNextCell(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(15)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(14)
faces.InsertCellPoint(15)
faces.InsertNextCell(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(13)
faces.InsertCellPoint(11)
faces.InsertNextCell(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(15)
faces.InsertCellPoint(13)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(12)
faces.InsertCellPoint(10)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(13)
faces.InsertCellPoint(12)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(14)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(12)
faces.InsertCellPoint(14)
faces.InsertNextCell(3)
faces.InsertCellPoint(8)
faces.InsertCellPoint(17)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(8)
faces.InsertCellPoint(9)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(9)
faces.InsertCellPoint(19)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(9)
faces.InsertCellPoint(11)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(18)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(10)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(16)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(8)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(13)
faces.InsertCellPoint(21)
faces.InsertCellPoint(12)
faces.InsertNextCell(3)
faces.InsertCellPoint(12)
faces.InsertCellPoint(21)
faces.InsertCellPoint(20)
faces.InsertNextCell(3)
faces.InsertCellPoint(12)
faces.InsertCellPoint(20)
faces.InsertCellPoint(14)
faces.InsertNextCell(3)
faces.InsertCellPoint(14)
faces.InsertCellPoint(20)
faces.InsertCellPoint(22)
faces.InsertNextCell(3)
faces.InsertCellPoint(14)
faces.InsertCellPoint(22)
faces.InsertCellPoint(15)
faces.InsertNextCell(3)
faces.InsertCellPoint(15)
faces.InsertCellPoint(22)
faces.InsertCellPoint(23)
faces.InsertNextCell(3)
faces.InsertCellPoint(15)
faces.InsertCellPoint(23)
faces.InsertCellPoint(13)
faces.InsertNextCell(3)
faces.InsertCellPoint(13)
faces.InsertCellPoint(23)
faces.InsertCellPoint(21)
faces.InsertNextCell(3)
faces.InsertCellPoint(21)
faces.InsertCellPoint(25)
faces.InsertCellPoint(24)
faces.InsertNextCell(3)
faces.InsertCellPoint(21)
faces.InsertCellPoint(24)
faces.InsertCellPoint(20)
faces.InsertNextCell(3)
faces.InsertCellPoint(20)
faces.InsertCellPoint(24)
faces.InsertCellPoint(26)
faces.InsertNextCell(3)
faces.InsertCellPoint(20)
faces.InsertCellPoint(26)
faces.InsertCellPoint(22)
faces.InsertNextCell(3)
faces.InsertCellPoint(22)
faces.InsertCellPoint(26)
faces.InsertCellPoint(27)
faces.InsertNextCell(3)
faces.InsertCellPoint(22)
faces.InsertCellPoint(27)
faces.InsertCellPoint(23)
faces.InsertNextCell(3)
faces.InsertCellPoint(23)
faces.InsertCellPoint(27)
faces.InsertCellPoint(25)
faces.InsertNextCell(3)
faces.InsertCellPoint(23)
faces.InsertCellPoint(25)
faces.InsertCellPoint(21)
faces.InsertNextCell(3)
faces.InsertCellPoint(25)
faces.InsertCellPoint(29)
faces.InsertCellPoint(24)
faces.InsertNextCell(3)
faces.InsertCellPoint(24)
faces.InsertCellPoint(29)
faces.InsertCellPoint(28)
faces.InsertNextCell(3)
faces.InsertCellPoint(24)
faces.InsertCellPoint(28)
faces.InsertCellPoint(26)
faces.InsertNextCell(3)
faces.InsertCellPoint(26)
faces.InsertCellPoint(28)
faces.InsertCellPoint(30)
faces.InsertNextCell(3)
faces.InsertCellPoint(26)
faces.InsertCellPoint(30)
faces.InsertCellPoint(27)
faces.InsertNextCell(3)
faces.InsertCellPoint(27)
faces.InsertCellPoint(30)
faces.InsertCellPoint(31)
faces.InsertNextCell(3)
faces.InsertCellPoint(27)
faces.InsertCellPoint(31)
faces.InsertCellPoint(25)
faces.InsertNextCell(3)
faces.InsertCellPoint(25)
faces.InsertCellPoint(31)
faces.InsertCellPoint(29)
faces.InsertNextCell(3)
faces.InsertCellPoint(29)
faces.InsertCellPoint(19)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(29)
faces.InsertCellPoint(17)
faces.InsertCellPoint(28)
faces.InsertNextCell(3)
faces.InsertCellPoint(28)
faces.InsertCellPoint(17)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(28)
faces.InsertCellPoint(16)
faces.InsertCellPoint(30)
faces.InsertNextCell(3)
faces.InsertCellPoint(30)
faces.InsertCellPoint(16)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(30)
faces.InsertCellPoint(18)
faces.InsertCellPoint(31)
faces.InsertNextCell(3)
faces.InsertCellPoint(31)
faces.InsertCellPoint(18)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(31)
faces.InsertCellPoint(19)
faces.InsertCellPoint(29)
model = vtk.vtkPolyData()
model.SetPolys(faces)
model.SetPoints(points)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#vtkButterflySubdivisionFilter subdivide
subdivide = vtk.vtkLoopSubdivisionFilter()
subdivide.SetInputData(model)
subdivide.SetNumberOfSubdivisions(4)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(subdivide.GetOutputPort())
rose = vtk.vtkLODActor()
rose.SetMapper(mapper)
fe = vtk.vtkFeatureEdges()
fe.SetInputConnection(subdivide.GetOutputPort())
fe.SetFeatureAngle(100)
feMapper = vtk.vtkPolyDataMapper()
feMapper.SetInputConnection(fe.GetOutputPort())
edges = vtk.vtkActor()
edges.SetMapper(feMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(rose)
#ren1 AddActor edges
backP = vtk.vtkProperty()
backP.SetDiffuseColor(1,1,.3)
rose.SetBackfaceProperty(backP)
rose.GetProperty().SetDiffuseColor(1,.4,.3)
rose.GetProperty().SetSpecular(.4)
rose.GetProperty().SetDiffuse(.8)
rose.GetProperty().SetSpecularPower(40)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(300,300)
# render the image
#
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.Zoom(4.5)
cam1.Azimuth(-90)
ren1.ResetCameraClippingRange()
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
# Copyright (c) 2011, Hua Huang and Robert D. Cameron.
# Licensed under the Academic Free License 3.0.
from Utility import configure
def StrategyPool(curRegSize):
strategies = \
{
"add1":\
{
"body":r'''return simd_xor(arg1, arg2)''',
"Ops":["simd_add", "simd_sub"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"umin1":\
{
"body":r'''return simd_and(arg1, arg2)''',
"Ops":["simd_max", "simd_umin", "simd_mult"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"umax1":\
{
"body":r'''return simd_or(arg1, arg2)''',
"Ops":["simd_min", "simd_umax"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"ult1":\
{
"body":r'''return simd_andc(arg2, arg1)''',
"Ops":["simd_ult", "simd_gt"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"ugt1":\
{
"body":r'''return simd_andc(arg1, arg2)''',
"Ops":["simd_lt", "simd_ugt"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"eq1":\
{
"body":r'''return simd_not(simd_xor(arg1, arg2))''',
"Ops":["simd_eq"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"ctz1":\
{
"body":r'''return simd_not(arg1)''',
"Ops":["simd_ctz"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"unsigned_predicate_using_signed":\
{
"body":r'''high_bit = simd_constant(fw, 1<<(fw-1))
return simd_signed_op(fw, simd_xor(arg1, high_bit), simd_xor(arg2, high_bit))''',
"Ops":["simd_ugt", "simd_ult"],
"Fws":range(1, 64+1),
"Platforms":[configure.ALL],
},
"signed_predicate_using_unsigned":\
{
"body":r'''high_bit = simd_constant(fw, 1<<(fw-1))
return simd_uop(fw, simd_xor(arg1, high_bit), simd_xor(arg2, high_bit))''',
"Ops":["simd_gt", "simd_lt"],
"Fws":range(1, 64+1),
"Platforms":[configure.ALL],
},
"unsigned_value_using_signed":\
{
"body":r'''high_bit = simd_constant(fw, 1<<(fw-1))
return simd_xor(simd_signed_op(fw, simd_xor(arg1, high_bit), simd_xor(arg2, high_bit)), high_bit)''',
"Ops":["simd_umax", "simd_umin"],
"Fws":range(1, 64+1),
"Platforms":[configure.ALL],
},
"signed_value_using_unsigned":\
{
"body":r'''high_bit = simd_constant(fw, 1<<(fw-1))
return simd_xor(simd_uop(fw, simd_xor(arg1, high_bit), simd_xor(arg2, high_bit)), high_bit)''',
"Ops":["simd_max", "simd_min"],
"Fws":range(1, 64+1),
"Platforms":[configure.ALL],
},
"in_place_if_doubling":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, arg1, simd_and(simd_himask(2*fw), arg2)), simd_op(2*fw, arg1, arg2))''',
"Ops":["simd_add", "simd_sub"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"in_place_hmask_post_lmask_pre":\
{
"body":r'''
return simd_or(simd_and(simd_himask(2*fw), simd_op(2*fw, arg1, arg2)), simd_op(2*fw, simd_and(simd_lomask(2*fw), arg1), simd_and(simd_lomask(2*fw), arg2)))''',
"Ops":["simd_umax", "simd_umin"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"in_place_or_doubling":\
{
"body":r'''
return simd_or(simd_and(simd_himask(2*fw), simd_op(2*fw, simd_and(simd_himask(2*fw), arg1), simd_and(simd_himask(2*fw), arg2))), simd_and(simd_lomask(2*fw), simd_op(2*fw, simd_and(simd_lomask(2*fw), arg1), simd_and(simd_lomask(2*fw), arg2))))''',
"Ops":["simd_eq", "simd_ugt", "simd_ult", "simd_umax", "simd_umin"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"in_place_sign_extension_doubling":\
{
"body":r'''
return simd_or(simd_and(simd_himask(2*fw), simd_op(2*fw, simd_and(simd_himask(2*fw), arg1), simd_and(simd_himask(2*fw), arg2))), simd_and(simd_op(2*fw, simd_or(simd_and(arg1, simd_lomask(2*fw)), simd_sub(2*fw, simd_constant(2*fw, 0), simd_and(simd_constant(2*fw, 1<<(fw-1)), arg1))), simd_or(simd_and(arg2, simd_lomask(2*fw)), simd_sub(2*fw, simd_constant(2*fw, 0), simd_and(simd_constant(2*fw, 1<<(fw-1)), arg2)))), simd_lomask(2*fw)))''',
"Ops":["simd_gt", "simd_lt", "simd_max", "simd_min"],
"Fws":range(2, 32+1),
"Platforms":[configure.ALL],
},
"shift_if_doubling":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, simd_and(simd_himask(2*fw), arg1), simd_and(simd_himask(2*fw), arg2)), simd_srli(2*fw, fw, simd_op(2*fw, simd_slli(2*fw, fw, arg1), simd_slli(2*fw, fw, arg2))))''',
"Ops":["simd_gt", "simd_lt", "simd_max", "simd_min"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"predicate_shift_if_doubling_gt":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, simd_and(simd_himask(2*fw), arg1), arg2), simd_op(2*fw, simd_slli(2*fw, fw, arg1), simd_slli(2*fw, fw, arg2)))''',
"Ops":["simd_gt", "simd_ugt"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"predicate_shift_if_doubling_lt":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, arg1, simd_and(simd_himask(2*fw), arg2)), simd_op(2*fw, simd_slli(2*fw, fw, arg1), simd_slli(2*fw, fw, arg2)))''',
"Ops":["simd_lt", "simd_ult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"predicate_if_doubling_ugt":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, simd_and(simd_himask(2*fw), arg1), arg2), simd_op(2*fw, simd_andc(arg1, simd_himask(2*fw)), simd_andc(arg2, simd_himask(2*fw))))''',
"Ops":["simd_ugt"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"predicate_if_doubling_ult":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), simd_op(2*fw, arg1, simd_and(simd_himask(2*fw), arg2)), simd_op(2*fw, simd_andc(arg1, simd_himask(2*fw)), simd_andc(arg2, simd_himask(2*fw))))''',
"Ops":["simd_ult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"umult_doubling":\
{
"body":r'''
loMask = simd_lomask(2*fw)
tmpAns1 = simd_umult(2*fw, simd_and(loMask, arg1), simd_and(loMask, arg2))
tmpAns2 = simd_umult(2*fw, simd_and(loMask, simd_srli(4*fw, 2*fw, arg1)), simd_and(loMask, simd_srli(4*fw, 2*fw, arg2)))
return simd_or(tmpAns1, simd_slli(4*fw, 2*fw, tmpAns2))''',
"Ops":["simd_umult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"mult_doubling":\
{
"body":r'''
loMask = simd_lomask(2*fw)
tmpAns1 = simd_mult(2*fw, simd_and(loMask, arg1), simd_and(loMask, arg2))
tmpAns2 = simd_mult(2*fw, simd_srli(2*fw, fw, arg1), simd_srli(2*fw, fw, arg2))
return simd_ifh(1, loMask, tmpAns1, simd_slli(2*fw, fw, tmpAns2))''',
"Ops":["simd_mult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"umult_halving":\
{
"body":r'''
loMask1 = simd_lomask(2*fw)
arg11 = simd_and(arg1, loMask1)
arg22 = simd_and(arg2, loMask1)
loMask2 = simd_lomask(fw)
arg1_low = simd_and(arg11, loMask2)
arg1_high = simd_srli(fw, fw/2, arg11)
arg2_low = simd_and(arg22, loMask2)
arg2_high = simd_srli(fw, fw/2, arg22)
tmpAns1 = simd_umult(fw/2, arg1_low, arg2_low)
tmpAns2 = simd_slli(2*fw, fw/2, simd_umult(fw/2, arg1_low, arg2_high))
tmpAns3 = simd_slli(2*fw, fw/2, simd_umult(fw/2, arg1_high, arg2_low))
tmpAns4 = simd_slli(2*fw, fw, simd_umult(fw/2, arg1_high, arg2_high))
return simd_add(2*fw, tmpAns1, simd_add(2*fw, tmpAns2, simd_add(2*fw, tmpAns3, tmpAns4)))''',
"Ops":["simd_umult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"mult_halving":\
{
"body":r'''
loMask = simd_lomask(fw)
arg1_low = simd_and(arg1, loMask)
arg1_high = simd_srli(fw, fw/2, arg1)
arg2_low = simd_and(arg2, loMask)
arg2_high = simd_srli(fw, fw/2, arg2)
tmpAns1 = simd_umult(fw/2, arg1_low, arg2_low)
tmpAns2 = simd_slli(fw, fw/2, simd_umult(fw/2, arg1_low, arg2_high))
tmpAns3 = simd_slli(fw, fw/2, simd_umult(fw/2, arg1_high, arg2_low))
return simd_add(fw, tmpAns1, simd_add(fw, tmpAns2, tmpAns3))''',
"Ops":["simd_mult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"mult_32_using_Multiply_Packed_Signed_Dword_Integers":\
{
"body":r'''
return simd_or(simd_slli(2*fw, fw, _mm_mul_epi32(simd_srli(2*fw, fw, arg1), simd_srli(2*fw, fw, arg2))), simd_and(simd_lomask(2*fw), _mm_mul_epi32(arg1, arg2)))''',
"Ops":["simd_mult"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"add_halving":\
{
"body":r'''
ans = simd_add(fw/2, arg1, arg2)
carryMask = simd_or(simd_and(arg1, arg2), simd_and(simd_xor(arg1, arg2), simd_not(ans)))
loMask = simd_lomask(fw)
carry = simd_slli(fw, 1, simd_and(carryMask, loMask))
return simd_ifh(1, loMask, ans, simd_add(fw/2, ans, carry))''',
"Ops":["simd_add"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"add_another_halving":\
{
"body":r'''
partial = simd_add(fw/2, arg1, arg2)
carryMask = simd_or(simd_and(arg1, arg2), simd_andc(simd_xor(arg1, arg2), partial))
carry = simd_slli(fw, fw/2, simd_srli(fw/2, fw/2-1, carryMask))
return simd_add(fw/2, partial, carry)''',
"Ops":["simd_add"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"sub_halving":\
{
"body":r'''
ans = simd_sub(fw/2, arg1, arg2)
borrowMask = simd_or(simd_andc(arg2, arg1), simd_and(simd_not(simd_xor(arg1, arg2)), ans))
loMask = simd_lomask(fw)
borrow = simd_slli(fw, 1, simd_and(borrowMask, loMask))
return simd_ifh(1, loMask, ans, simd_sub(fw/2, ans, borrow))''',
"Ops":["simd_sub"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"sub_another_halving":\
{
"body":r'''
partial = simd_sub(fw/2, arg1, arg2)
borrowMask = simd_or(simd_andc(arg2, arg1), simd_andc(partial, simd_xor(arg1, arg2)))
borrow = simd_slli(fw, fw/2, simd_srli(fw/2, fw/2-1, borrowMask))
return simd_sub(fw/2, partial, borrow)''',
"Ops":["simd_sub"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"eq_halving":\
{
"body":r'''
tmpAns = simd_eq(fw/2, arg1, arg2)
loMask = simd_and(tmpAns, simd_srli(fw, fw/2, tmpAns))
hiMask = simd_slli(fw, fw/2, loMask)
return simd_or(loMask, hiMask)''',
"Ops":["simd_eq"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"gt_lt_halving":\
{
"body":r'''
hiAns = simd_op(fw/2, arg1, arg2)
loAns = simd_uop(fw/2, arg1, arg2)
mask = simd_and(loAns, simd_srli(fw, fw/2, simd_eq(fw/2, arg1, arg2)))
mask = simd_or(mask, simd_slli(fw, fw/2, mask))
return simd_or(simd_srai(fw, fw/2, hiAns), mask)''',
"Ops":["simd_gt", "simd_lt"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"ugt_ult_halving":\
{
"body":r'''
tmpAns = simd_op(fw/2, arg1, arg2)
mask = simd_and(tmpAns, simd_srli(fw, fw/2, simd_eq(fw/2, arg1, arg2)))
mask = simd_or(mask, simd_slli(fw, fw/2, mask))
return simd_or(simd_srai(fw, fw/2, tmpAns), mask)''',
"Ops":["simd_ugt", "simd_ult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"max_min_halving":\
{
"body":r'''
hiAns = simd_op(fw/2, arg1, arg2)
loAns = simd_uop(fw/2, arg1, arg2)
eqMask1 = simd_srli(fw, fw/2, simd_eq(fw/2, hiAns, arg1))
eqMask2 = simd_srli(fw, fw/2, simd_eq(fw/2, hiAns, arg2))
return simd_ifh(1, simd_himask(fw), hiAns, simd_ifh(1, eqMask1, simd_ifh(1, eqMask2, loAns, arg1), arg2))''',
"Ops":["simd_max", "simd_min"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"max_gt_blend":\
{
"body":r'''
return simd_ifh(1, simd_gt(fw, arg1, arg2), arg1, arg2)''',
"Ops":["simd_max"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"max_lt_blend":\
{
"body":r'''
return simd_ifh(1, simd_lt(fw, arg1, arg2), arg2, arg1)''',
"Ops":["simd_max"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"min_lt_blend":\
{
"body":r'''
return simd_ifh(1, simd_lt(fw, arg1, arg2), arg1, arg2)''',
"Ops":["simd_min"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"min_gt_blend":\
{
"body":r'''
return simd_ifh(1, simd_gt(fw, arg1, arg2), arg2, arg1)''',
"Ops":["simd_min"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"umax_umin_halving":\
{
"body":r'''
tmpAns = simd_op(fw/2, arg1, arg2)
eqMask1 = simd_srli(fw, fw/2, simd_eq(fw/2, tmpAns, arg1))
eqMask2 = simd_srli(fw, fw/2, simd_eq(fw/2, tmpAns, arg2))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_ifh(1, eqMask1, simd_ifh(1, eqMask2, tmpAns, arg1), arg2))''',
"Ops":["simd_umax", "simd_umin"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"srai_halving":\
{
"body":r'''
return simd_or(simd_and(simd_himask(fw), simd_srai(fw/2, sh if sh<fw/2 else fw/2, arg1)), simd_srli(fw, sh, arg1) if sh<=fw/2 else simd_srai(fw/2, max(0, sh-(fw/2)), simd_srli(fw, fw/2, arg1)))''',
"Ops":["simd_srai"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"ugt_blend":\
{
"body":r'''
return simd_and(simd_srai(fw, fw-1, simd_or(simd_and(arg1, simd_not(arg2)), simd_and(simd_not(simd_xor(arg1, arg2)), simd_not(simd_sub(fw, arg1, arg2))))), simd_not(simd_eq(fw, arg1, arg2)))''',
"Ops":["simd_ugt"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"lt_blend":\
{
"body":r'''
return simd_and(simd_not(simd_gt(fw, arg1, arg2)), simd_not(simd_eq(fw, arg1, arg2)))''',
"Ops":["simd_lt"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"ult_blend":\
{
"body":r'''
return simd_and(simd_srai(fw, fw-1, simd_or(simd_and(simd_not(arg1), arg2), simd_and(simd_not(simd_xor(arg1, arg2)), simd_sub(fw, arg1, arg2)))), simd_not(simd_eq(fw, arg1, arg2)))''',
"Ops":["simd_ult"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"abs_blend":\
{
"body":r'''
gtMask = simd_gt(fw, arg1, simd_constant(fw, 0))
return simd_ifh(1, gtMask, arg1, simd_sub(fw, gtMask, arg1))''',
"Ops":["simd_abs"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"abs_halving":\
{
"body":r'''
eqMask = simd_eq(fw, simd_ifh(1, simd_himask(fw), simd_abs(fw/2, arg1), arg1), arg1)
return simd_ifh(1, eqMask, arg1, simd_sub(fw, eqMask, arg1))''',
"Ops":["simd_abs"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"neg_blend":\
{
"body":r'''
return simd_sub(fw, simd_constant(fw, 0), arg1)''',
"Ops":["simd_neg"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"neg_8_Packed SIGN":\
{
"body":r'''
return _mm_sign_epi8(arg1, simd_constant(fw, (1<<fw)-1))''',
"Ops":["simd_neg"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"neg_16_Packed SIGN":\
{
"body":r'''
return _mm_sign_epi16(arg1, simd_constant(fw, (1<<fw)-1))''',
"Ops":["simd_neg"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"neg_32_Packed SIGN":\
{
"body":r'''
return _mm_sign_epi32(arg1, simd_constant(fw, (1<<fw)-1))''',
"Ops":["simd_neg"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"srai_2_blend":\
{
"body":r'''
return simd_ifh(1, simd_eq(32, simd_constant(32, sh), simd_constant(32, 0)), arg1, simd_or(simd_and(simd_himask(2), arg1), simd_srli(fw, 1, arg1)))''',
"Ops":["simd_srai"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"srai_2_blend_1":\
{
"body":r'''
return arg1 if sh==0 else simd_or(simd_and(simd_himask(2), arg1), simd_srli(fw, 1, arg1))''',
"Ops":["simd_srai"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"srai_blend_subtract":\
{
"body":r'''
tmp = simd_srli(fw, (fw-1 if sh>=fw else 0 if sh<0 else sh), arg1)
return simd_or(tmp, simd_sub(fw, simd_constant(fw, 0), simd_and(simd_constant(fw, 1<<(fw-(fw-1 if sh>=fw else 0 if sh<0 else sh)-1)), tmp)))''',
"Ops":["simd_srai"],
"Fws":[2, 4, 8, 16, 32],
"Platforms":[configure.ALL],
},
"srai_blend_substract_1":\
{
"body":r'''
tmp = simd_srli(fw, (fw-1 if sh>=fw else 0 if sh<0 else sh), arg1)
return simd_or(tmp, simd_sub(fw, simd_constant(fw, 0), simd_and(simd_slli(fw, (fw-(fw-1 if sh>=fw else 0 if sh<0 else sh)-1), simd_constant(fw, 1)), tmp)))''',
"Ops":["simd_srai"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"srai_8_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_s8(arg1, sh))''',
"Ops":["simd_srai"],
"Fws":[8],
"Platforms":[configure.NEON],
},
"srai_16_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_s16(arg1, sh))''',
"Ops":["simd_srai"],
"Fws":[16],
"Platforms":[configure.NEON],
},
"srai_32_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_s32(arg1, sh))''',
"Ops":["simd_srai"],
"Fws":[32],
"Platforms":[configure.NEON],
},
"srai_64_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_s64(arg1, sh))''',
"Ops":["simd_srai"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"slli_increment_blend":\
{
"body":r'''
return simd_and(simd_slli(32, sh, arg1), simd_constant(fw, (((1<<fw)-1)<<sh)&((1<<fw)-1)))''',
"Ops":["simd_slli"],
"Fws":[2, 4, 8],
"Platforms":[configure.ALL],
},
"vsll_64_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(128), _mm_sll_epi64(arg1, simd_and(_mm_srli_si128(shift_mask, 8), _mm_cvtsi32_si128(63))), _mm_sll_epi64(arg1, simd_and(shift_mask, _mm_cvtsi32_si128(63))))''',
"Ops":["simd_vsll"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"vsll_128_blend":\
{
"body":r'''
shift = simd_and(shift_mask, _mm_cvtsi32_si128(127))
return simd_or(_mm_sll_epi64(arg1, shift), simd_or(_mm_slli_si128(_mm_sll_epi64(arg1, simd_sub(32, shift, _mm_cvtsi32_si128(64))), 8), _mm_slli_si128(_mm_srl_epi64(arg1, simd_sub(32, _mm_cvtsi32_si128(64), shift)), 8)))''',
"Ops":["simd_vsll"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"sll_128_blend":\
{
"body":r'''
shift = simd_and(shift_mask, _mm_cvtsi32_si128((1<<32)-1))
return simd_or(_mm_sll_epi64(arg1, shift), simd_or(_mm_slli_si128(_mm_sll_epi64(arg1, simd_sub(32, shift, _mm_cvtsi32_si128(64))), 8), _mm_slli_si128(_mm_srl_epi64(arg1, simd_sub(32, _mm_cvtsi32_si128(64), shift)), 8)))''',
"Ops":["simd_sll"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
# doesn't work...
# "sll_64_neon":\
# {
# "body":r'''
#shift = IDISA_CASTING("int64x2_t", simd_or(shift_mask, vextq_u64(shift_mask, shift_mask, 1)))
#return vshlq_u64(arg1, shift)''',
# "Ops":["simd_sll"],
# "Fws":[64],
# "Platforms":[configure.NEON],
# },
"slli_128_blend":\
{
"body":r'''
return _mm_slli_si128(arg1, sh/8) if (sh%8==0) else (simd_slli(64, (sh)&0x3F, _mm_slli_si128(arg1, 8)) if (sh>=64) else simd_or(simd_slli(64, sh, arg1), _mm_slli_si128(simd_srli(64, (128-sh)&0x3F, arg1), 8)))''',
"Ops":["simd_slli"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"slli_128_avx":\
{
"body":r'''
return avx_byte_shift_left(arg1, (sh)/8) if (sh%8==0) else (simd_slli(64, (sh)&0x3F, avx_byte_shift_left(arg1, 8)) if (sh>=64) else simd_or(simd_slli(64, sh, arg1), avx_byte_shift_left(simd_srli(64, (128-sh)&0x3F, arg1), 8)))''',
"Ops":["simd_slli"],
"Fws":[128],
"Platforms":configure.AVX_SERIES,
},
# This strategy is wrong. Together with simd_srli_256, when sh = 0
"slli_256_avx":\
{
"body":r'''
return simd_or(simd_slli(128, sh, arg1), avx_move_lo128_to_hi128(simd_srli(128, (128-sh), arg1))) if (sh<128) else simd_slli(128, sh-128, avx_move_lo128_to_hi128(arg1))''',
"Ops":["simd_slli"],
"Fws":[256],
"Platforms":configure.AVX_SERIES,
},
"slli_8_neon":\
{
"body":r'''
return simd_constant(32, 0) if sh==8 else IDISA_CASTING("SIMD_type", vshlq_n_u8(arg1, sh))''',
"Ops":["simd_slli"],
"Fws":[8],
"Platforms":[configure.NEON],
},
"slli_16_neon":\
{
"body":r'''
return simd_constant(32, 0) if sh==16 else IDISA_CASTING("SIMD_type", vshlq_n_u16(arg1, sh))''',
"Ops":["simd_slli"],
"Fws":[16],
"Platforms":[configure.NEON],
},
"slli_32_neon":\
{
"body":r'''
return simd_constant(32, 0) if sh==32 else IDISA_CASTING("SIMD_type", vshlq_n_u32(arg1, sh))''',
"Ops":["simd_slli"],
"Fws":[32],
"Platforms":[configure.NEON],
},
"slli_64_neon":\
{
"body":r'''
return simd_constant(32, 0) if sh==64 else IDISA_CASTING("SIMD_type", vshlq_n_u64(arg1, sh))''',
"Ops":["simd_slli"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"slli_128_neon":\
{
"body":r'''
return simd_constant(32, 0) if sh==128 else (simd_slli(64, (sh)&0x3F, neon_shift_left_64_bits(arg1)) if sh>=64 else simd_or(neon_shift_left_64_bits(simd_srli(64, 64 - sh, arg1)), simd_slli(64, sh, arg1)))''',
"Ops":["simd_slli"],
"Fws":[128],
"Platforms":[configure.NEON],
},
"vsrl_64_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(128), _mm_srl_epi64(arg1, simd_and(_mm_srli_si128(shift_mask, 8), _mm_cvtsi32_si128(63))), _mm_srl_epi64(arg1, simd_and(shift_mask, _mm_cvtsi32_si128(63))))''',
"Ops":["simd_vsrl"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"vsrl_128_blend":\
{
"body":r'''
shift = simd_and(shift_mask, _mm_cvtsi32_si128(127))
return simd_or(_mm_srl_epi64(arg1, shift), simd_or(_mm_srli_si128(_mm_srl_epi64(arg1, simd_sub(32, shift, _mm_cvtsi32_si128(64))), 8), _mm_srli_si128(_mm_sll_epi64(arg1, simd_sub(32, _mm_cvtsi32_si128(64), shift)), 8)))''',
"Ops":["simd_vsrl"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"srl_128_blend":\
{
"body":r'''
shift = simd_and(shift_mask, _mm_cvtsi32_si128((1<<32)-1))
return simd_or(_mm_srl_epi64(arg1, shift), simd_or(_mm_srli_si128(_mm_srl_epi64(arg1, simd_sub(32, shift, _mm_cvtsi32_si128(64))), 8), _mm_srli_si128(_mm_sll_epi64(arg1, simd_sub(32, _mm_cvtsi32_si128(64), shift)), 8)))''',
"Ops":["simd_srl"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"srl_256_blend":\
{
"body":r'''
shift = _mm_cvtsi128_si32(avx_select_lo128(shift_mask))
n = shift / 64
arg2 = mvmd_srli(64, 1, arg1) if n==1 else (mvmd_srli(64, 2, arg1) if n==2 else (mvmd_srli(64, 3, arg1) if n==3 else arg1))
return simd_constant(32, 0) if n>=4 else (simd_or(_mm256_srl_epi64(arg2, _mm_cvtsi32_si128(shift & 63)), mvmd_srli(64, 1, _mm256_sll_epi64(arg2, _mm_cvtsi32_si128(64 - (shift & 63))))) if (shift & 63) > 0 else arg2)
''',
"Ops":["simd_srl"],
"Fws":[256],
"Platforms":[configure.AVX2],
},
"sll_256_blend":\
{
"body":r'''
shift = _mm_cvtsi128_si32(avx_select_lo128(shift_mask))
n = shift / 64
arg2 = mvmd_slli(64, 1, arg1) if n==1 else (mvmd_slli(64, 2, arg1) if n==2 else (mvmd_slli(64, 3, arg1) if n==3 else arg1))
return simd_constant(32, 0) if n>=4 else (simd_or(_mm256_sll_epi64(arg2, _mm_cvtsi32_si128(shift & 63)), mvmd_slli(64, 1, _mm256_srl_epi64(arg2, _mm_cvtsi32_si128(64 - (shift & 63))))) if (shift & 63) > 0 else arg2)
''',
"Ops":["simd_sll"],
"Fws":[256],
"Platforms":[configure.AVX2],
},
"srli_increment_blend":\
{
"body":r'''
return simd_and(simd_srli(32, sh, arg1), simd_constant(fw, ((1<<fw)-1)>>sh))''',
"Ops":["simd_srli"],
"Fws":[2, 4, 8],
"Platforms":[configure.ALL],
},
"srli_128_blend":\
{
"body":r'''
return _mm_srli_si128(arg1, sh/8) if (sh%8==0) else (simd_srli(64, (sh)&0x3F, _mm_srli_si128(arg1, 8)) if (sh>=64) else simd_or(simd_srli(64, sh, arg1), _mm_srli_si128(simd_slli(64, (128-sh)&0x3F, arg1), 8)))''',
"Ops":["simd_srli"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"srli_128_avx":\
{
"body":r'''
return avx_byte_shift_right(arg1, (sh)/8) if (sh%8==0) else (simd_srli(64, (sh)&0x3F, avx_byte_shift_right(arg1, 8)) if (sh>=64) else simd_or(simd_srli(64, sh, arg1), avx_byte_shift_right(simd_slli(64, (128-sh)&0x3F, arg1), 8)))''',
"Ops":["simd_srli"],
"Fws":[128],
"Platforms":configure.AVX_SERIES,
},
"srli_256_avx":\
{
"body":r'''
return simd_or(simd_srli(128, sh, arg1), simd_slli(128, (128-sh), IDISA_CASTING("_mm256_castsi128_si256", avx_select_hi128(arg1)))) if (sh<128) else simd_srli(128, (sh - 128), avx_move_hi128_to_lo128(arg1))''',
"Ops":["simd_srli"],
"Fws":[256],
"Platforms":configure.AVX_SERIES,
},
"srli_8_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_u8(arg1, sh))''',
"Ops":["simd_srli"],
"Fws":[8],
"Platforms":[configure.NEON],
},
"srli_16_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_u16(arg1, sh))''',
"Ops":["simd_srli"],
"Fws":[16],
"Platforms":[configure.NEON],
},
"srli_32_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_u32(arg1, sh))''',
"Ops":["simd_srli"],
"Fws":[32],
"Platforms":[configure.NEON],
},
"srli_64_neon":\
{
"body":r'''
return arg1 if sh==0 else IDISA_CASTING("SIMD_type", vshrq_n_u64(arg1, sh))''',
"Ops":["simd_srli"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"srli_128_neon":\
{
"body":r'''
return neon_shift_right_64_bits(arg1) if sh==64 else (simd_srli(64, (sh)&0x3F, neon_shift_right_64_bits(arg1)) if sh>64 else simd_or(neon_shift_right_64_bits(simd_slli(64, 64 - sh, arg1)), simd_srli(64, sh, arg1)))''',
"Ops":["simd_srli"],
"Fws":[128],
"Platforms":[configure.NEON],
},
"not_blend":\
{
"body":r'''
return simd_xor(arg1, simd_constant(32, 4294967295))''',
"Ops":["simd_not"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"nor_blend":\
{
"body":r'''
return simd_not(simd_or(arg1, arg2))''',
"Ops":["simd_nor"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"popcount_1_blend":\
{
"body":r'''
return arg1''',
"Ops":["simd_popcount"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"popcount_halving":\
{
"body":r'''
return simd_add_hl(fw, simd_popcount(fw/2, arg1))''',
"Ops":["simd_popcount"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"popcount_regSize_blend":\
{
"body":r'''
tmpAns = simd_popcount(curRegSize/2, arg1)
return simd_add(curRegSize/2, simd_and(tmpAns, simd_lomask(curRegSize)), simd_srli(curRegSize, curRegSize/2, tmpAns))''',
"Ops":["simd_popcount"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"popcount_64_sum_of_abs":\
{
"body":r'''
return _mm_sad_epu8(simd_popcount(8, arg1), simd_constant(8, 0))''',
"Ops":["simd_popcount"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"popcount_64_avx":\
{
"body":r'''
tmpAns = simd_popcount(8, arg1)
return avx_general_combine256(_mm_sad_epu8(avx_select_hi128(tmpAns), _mm_set1_epi32(0)), _mm_sad_epu8(avx_select_lo128(tmpAns), _mm_set1_epi32(0)))''',
"Ops":["simd_popcount"],
"Fws":[64],
"Platforms":configure.AVX_SERIES,
},
"bitblock_popcount_256_avx2":\
{
"body":r'''
return __builtin_popcountll(mvmd_extract(64, 0, arg1)) + __builtin_popcountll(mvmd_extract(64, 1, arg1)) + __builtin_popcountll(mvmd_extract(64, 2, arg1)) + __builtin_popcountll(mvmd_extract(64, 3, arg1))''',
"Ops":["bitblock_popcount"],
"Fws":[256],
"Platforms":[configure.AVX2],
},
"simd_popcount_256_avx2":\
{
"body":r'''
return _mm256_castsi128_si256(_mm_cvtsi64_si128(bitblock_popcount(arg1)))''',
"Ops":["simd_popcount"],
"Fws":[256],
"Platforms":[configure.AVX2],
},
"ctz_blend":\
{
"body":r'''
return simd_popcount(fw, simd_andc(simd_sub(fw, arg1, simd_constant(fw, 1)), arg1))''',
"Ops":["simd_ctz"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"ctz_1_blend":\
{
"body":r'''
return simd_not(arg1)''',
"Ops":["simd_ctz"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"if_1":\
{
"body":r'''
return simd_or(simd_and(arg2, arg1), simd_andc(arg3, arg1))''',
"Ops":["simd_ifh"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"if_blend":\
{
"body":r'''
return simd_ifh(1, simd_gt(fw, simd_constant(fw, 0), arg1), arg2, arg3)''',
"Ops":["simd_ifh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"if_havling":\
{
"body":r'''
return simd_ifh(fw/2, simd_ifh(1, simd_himask(fw), arg1, simd_srli(fw, fw/2, arg1)), arg2, arg3)''',
"Ops":["simd_ifh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"if_8_using_Variable_Blend_Packed_Bytes":\
{
"body":r'''
return _mm_blendv_epi8(arg3, arg2, arg1)''',
"Ops":["simd_ifh"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"if_curRegsize_avx":\
{
"body":r'''
ifMask = simd_eq(curRegSize, simd_constant(32, 0), simd_and(IDISA_CASTING("SIMD_type", _mm256_set_epi32(-2147483648, 0, 0, 0, 0, 0, 0, 0)), arg1))
return simd_ifh(1, ifMask, arg3, arg2)''',
"Ops":["simd_ifh"],
"Fws":[curRegSize],
"Platforms":configure.AVX_SERIES,
},
"lomask_blend":\
{
"body":r'''
return simd_constant(fw, (1<<(fw/2))-1)''',
"Ops":["simd_lomask"],
"Fws":range(2, 32+1),
"Platforms":[configure.ALL],
},
"lomask_64_blend":\
{
"body":r'''
return _mm_set_epi32(0,(1<<32)-1, 0, (1<<32)-1)''',
"Ops":["simd_lomask"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"lomask_64_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(0, (1<<32)-1, 0, (1<<32)-1, 0, (1<<32)-1, 0, (1<<32)-1))''',
"Ops":["simd_lomask"],
"Fws":[64],
"Platforms":configure.AVX_SERIES,
},
"lomask_64_neon":\
{
"body":r'''
return simd_constant(64, 4294967295L)''',
"Ops":["simd_lomask"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"lomask_128_blend":\
{
"body":r'''
return _mm_set_epi32(0, 0, (1<<32)-1, (1<<32)-1)''',
"Ops":["simd_lomask"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"lomask_128_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(0, 0, (1<<32)-1, (1<<32)-1, 0, 0,(1<<32)-1, (1<<32)-1))''',
"Ops":["simd_lomask"],
"Fws":[128],
"Platforms":configure.AVX_SERIES,
},
"lomask_256_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(0, 0, 0, 0,(1<<32)-1,(1<<32)-1,(1<<32)-1,(1<<32)-1))''',
"Ops":["simd_lomask"],
"Fws":[256],
"Platforms":configure.AVX_SERIES,
},
"lomask_128_neon":\
{
"body":r'''
return vsetq_lane_u64(-1, simd_constant(64, 0), 0)''',
"Ops":["simd_lomask"],
"Fws":[128],
"Platforms":[configure.NEON],
},
"himask_blend":\
{
"body":r'''
return simd_constant(fw, (0-(1<<(fw/2)))&((1<<fw)-1))''',
"Ops":["simd_himask"],
"Fws":range(2, 16+1),
"Platforms":[configure.ALL],
},
"himask_32_blend":\
{
"body":r'''
return simd_constant(fw, 4294901760)''',
"Ops":["simd_himask"],
"Fws":[32],
"Platforms":[configure.ALL],
},
"himask_64_blend":\
{
"body":r'''
return _mm_set_epi32((1<<32)-1, 0, (1<<32)-1, 0)''',
"Ops":["simd_himask"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"himask_64_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32((1<<32)-1, 0, (1<<32)-1, 0, (1<<32)-1, 0, (1<<32)-1, 0))''',
"Ops":["simd_himask"],
"Fws":[64],
"Platforms":configure.AVX_SERIES,
},
"himask_64_neon":\
{
"body":r'''
return simd_constant(64, 18446744069414584320L)''',
"Ops":["simd_himask"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"himask_128_blend":\
{
"body":r'''
return _mm_set_epi32((1<<32)-1, (1<<32)-1, 0, 0)''',
"Ops":["simd_himask"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"himask_128_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32((1<<32)-1,(1<<32)-1, 0, 0,(1<<32)-1,(1<<32)-1, 0, 0))''',
"Ops":["simd_himask"],
"Fws":[128],
"Platforms":configure.AVX_SERIES,
},
"himask_256_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32((1<<32)-1,(1<<32)-1,(1<<32)-1,(1<<32)-1, 0, 0, 0, 0))''',
"Ops":["simd_himask"],
"Fws":[256],
"Platforms":configure.AVX_SERIES,
},
"himask_128_neon":\
{
"body":r'''
return vsetq_lane_u64(-1, simd_constant(64, 0), 1)''',
"Ops":["simd_himask"],
"Fws":[128],
"Platforms":[configure.NEON],
},
"constant_doubling":\
{
"body":r'''
return simd_constant(2*fw, (val<<fw)|(val^(-1<<fw))) if val<0 else simd_constant(2*fw, (val<<fw)|val)
''',
"Ops":["simd_constant"],
"Fws":range(2, 16+1),
"Platforms":[configure.ALL],
},
"constant_doubling_1":\
{
"body":r'''
return simd_constant(2*fw, (val<<fw) | (val & ((1<<fw)-1)))
''',
"Ops":["simd_constant"],
"Fws":range(2, 5),
"Platforms":[configure.ALL],
},
"constant_1_blend":\
{
#simd<1>::constant only accepts 0 or 1
"body":r'''
return simd_constant(2, val + val + val)
''',
"Ops":["simd_constant"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"constant_64_blend":\
{
"body":r'''
return _mm_set_epi32(val>>32, val, val>>32, val)
''',
"Ops":["simd_constant"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"constant_64_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(val>>32, val, val>>32, val, val>>32, val, val>>32, val))''',
"Ops":["simd_constant"],
"Fws":[64],
"Platforms":configure.AVX_SERIES,
},
"constant_128_blend":\
{
"body":r'''
return _mm_set_epi32(0, 0, val>>32, val)
''',
"Ops":["simd_constant"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"constant_128_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(0, 0, val>>32, val, 0, 0, val>>32, val))''',
"Ops":["simd_constant"],
"Fws":[128],
"Platforms":configure.AVX_SERIES,
},
"constant_256_avx":\
{
"body":r'''
return IDISA_CASTING("SIMD_type", _mm256_set_epi32(0, 0, 0, 0, 0, 0, val>>32, val))''',
"Ops":["simd_constant"],
"Fws":[256],
"Platforms":configure.AVX_SERIES,
},
"constant_128_neon":\
{
"body":r'''
return vsetq_lane_u64(0, simd_constant(64, val), 1)''',
"Ops":["simd_constant"],
"Fws":[128],
"Platforms":[configure.NEON],
},
"hsimd_add_hl":\
{
"body":r'''
return simd_add(fw/2, hsimd_packh(fw, arg1, arg2), hsimd_packl(fw, arg1, arg2))
''',
"Ops":["hsimd_add_hl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"simd_add_hl_2":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_and(simd_slli(16, 1, arg1), arg1), simd_xor(simd_srli(16, 1, arg1), arg1))''',
"Ops":["simd_add_hl"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_add_hl_2_sub":\
{
"body":r'''
return simd_sub(16, arg1, simd_and(simd_lomask(fw), simd_srli(16, 1, arg1)))''',
"Ops":["simd_add_hl"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_add_hl":\
{
"body":r'''
return simd_add(fw, simd_srli(fw, fw/2, arg1), simd_and(arg1, simd_lomask(fw)))
''',
"Ops":["simd_add_hl"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"simd_add_hl_doubling":\
{
"body":r'''
return simd_add(2*fw, simd_srli(fw, fw/2, arg1), simd_and(arg1, simd_lomask(fw)))
''',
"Ops":["simd_add_hl"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
# "simd_add_hl_half_add":\
# {
# "body":r'''
#return simd_add(fw/2, simd_srli(fw, fw/2, arg1), simd_and(arg1, simd_lomask(fw)))''',
# "Ops":["simd_add_hl"],
# "Fws":range(4, curRegSize+1),
# },
"simd_add_hl_4_8":\
{
"body":r'''
return simd_add(fw, simd_and(simd_srli(16, fw/2, arg1), simd_lomask(fw)), simd_and(arg1, simd_lomask(fw)))
''',
"Ops":["simd_add_hl"],
"Fws":[4, 8],
"Platforms":[configure.ALL],
},
"simd_xor_hl":\
{
"body":r'''
return simd_xor(simd_srli(fw, fw/2, arg1), simd_and(arg1, simd_lomask(fw)))
''',
"Ops":["simd_xor_hl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"hsimd_min_hl":\
{
"body":r'''
return simd_min(fw/2, hsimd_packh(fw, arg1, arg2), hsimd_packl(fw, arg1, arg2))''',
"Ops":["hsimd_min_hl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"hsimd_min_hl_regsize":\
{
"body":r'''
return simd_ifh(1, simd_himask(curRegSize), simd_min(fw/2, arg1, simd_slli(fw, fw/2, arg1)), simd_min(fw/2, simd_srli(fw, fw/2, arg2), arg2))''',
"Ops":["hsimd_min_hl"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"hsimd_umin_hl":\
{
"body":r'''
return simd_umin(fw/2, hsimd_packh(fw, arg1, arg2), hsimd_packl(fw, arg1, arg2))''',
"Ops":["hsimd_umin_hl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"hsimd_umin_hl_regsize":\
{
"body":r'''
return simd_ifh(1, simd_himask(curRegSize), simd_umin(fw/2, arg1, simd_slli(fw, fw/2, arg1)), simd_umin(fw/2, simd_srli(fw, fw/2, arg2), arg2))''',
"Ops":["hsimd_umin_hl"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"packh_blend":\
{
"body":r'''
return hsimd_packl(fw, simd_srli(64, fw/2, arg1), simd_srli(64, fw/2, arg2))
''',
"Ops":["hsimd_packh"],
"Fws":range(2, 64+1),
"Platforms":[configure.ALL],
},
"packh_packus_blend":\
{
"body":r'''
return hsimd_packus(fw, simd_srli(fw, fw/2, arg1), simd_srli(fw, fw/2, arg2))
''',
"Ops":["hsimd_packh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"packh_regSize_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), arg1, simd_srli(fw, fw/2, arg2))
''',
"Ops":["hsimd_packh"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"packh_32_using_Packed_Horizontal_Subtract":\
{
"body":r'''
return _mm_hsub_epi16(simd_srli(fw, fw/2, arg2), simd_srli(fw, fw/2, arg1))''',
"Ops":["hsimd_packh"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packh_32_using_Packed_Horizontal_Subtract_and_Packed_SIGN":\
{
"body":r'''
return simd_neg(fw/2, _mm_hsub_epi16(simd_and(arg2, simd_himask(fw)), simd_and(arg1, simd_himask(fw))))''',
"Ops":["hsimd_packh"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packh_64_using_Packed_Horizontal_Subtract":\
{
"body":r'''
return _mm_hsub_epi32(simd_srli(fw, fw/2, arg2), simd_srli(fw, fw/2, arg1))''',
"Ops":["hsimd_packh"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packh_64_using_Packed_Horizontal_Subtract_and_Packed_SIGN":\
{
"body":r'''
return simd_neg(fw/2, _mm_hsub_epi32(simd_and(arg2, simd_himask(fw)), simd_and(arg1, simd_himask(fw))))''',
"Ops":["hsimd_packh"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packh_64_neon":\
{
"body":r'''
tmpArg1 = simd_and(simd_himask(fw), arg1)
tmpArg2 = simd_and(simd_himask(fw), arg2)
return vcombine_u64(vorr_u64(vshr_n_u64(vget_low_u64(tmpArg2), fw/2), vget_high_u64(tmpArg2)), vorr_u64(vshr_n_u64(vget_low_u64(tmpArg1), fw/2), vget_high_u64(tmpArg1)))
''',
"Ops":["hsimd_packh"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"packl_double":\
{
"body":r'''
return hsimd_packl(2*fw, simd_ifh(1, simd_himask(fw), simd_srli(curRegSize, fw/2, arg1), arg1), simd_ifh(1, simd_himask(fw), simd_srli(curRegSize, fw/2, arg2), arg2))
''',
"Ops":["hsimd_packl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"packl_double_ifh64":\
{
"body":r'''
return hsimd_packl(2*fw, simd_ifh(64, simd_himask(fw), simd_srli(curRegSize, fw/2, arg1), arg1), simd_ifh(64, simd_himask(fw), simd_srli(curRegSize, fw/2, arg2), arg2))
''',
"Ops":["hsimd_packl"],
"Fws":[128],
"Platforms":[configure.ALL],
},
"packl_blend":\
{
"body":r'''
return hsimd_packus(fw, simd_and(arg1, simd_lomask(fw)), simd_and(arg2, simd_lomask(fw)))
''',
"Ops":["hsimd_packl"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"packl_64_blend":\
{
"body":r'''
return simd_or(mvmd_shufflei(32, shufflemask4(2,0,3,3), simd_andc(arg1, simd_himask(64))), mvmd_shufflei(32, shufflemask4(3, 3, 2, 0), simd_andc(arg2, simd_himask(64))))
''',
"Ops":["hsimd_packl"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packl_regSize_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_slli(fw, fw/2, arg1), arg2)
''',
"Ops":["hsimd_packl"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"packl_32_using_Packed_Horizontal_Subtract":\
{
"body":r'''
return _mm_hsub_epi16(simd_and(arg2, simd_lomask(fw)), simd_and(arg1, simd_lomask(fw)))''',
"Ops":["hsimd_packl"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packl_64_using_Packed_Horizontal_Subtract":\
{
"body":r'''
return _mm_hsub_epi32(simd_and(arg2, simd_lomask(fw)), simd_and(arg1, simd_lomask(fw)))''',
"Ops":["hsimd_packl"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"packl_64_neon":\
{
"body":r'''
tmpArg1 = simd_and(simd_lomask(fw), arg1)
tmpArg2 = simd_and(simd_lomask(fw), arg2)
return vcombine_u64(vorr_u64(vshl_n_u64(vget_high_u64(tmpArg2), fw/2), vget_low_u64(tmpArg2)), vorr_u64(vshl_n_u64(vget_high_u64(tmpArg1), fw/2), vget_low_u64(tmpArg1)))
''',
"Ops":["hsimd_packl"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"packus_packl_blend":\
{
"body":r'''
arg11 = simd_ifh(fw, arg1, simd_constant(fw, 0), arg1)
arg12 = simd_and(simd_lomask(fw), arg11)
arg21 = simd_ifh(fw, arg2, simd_constant(fw, 0), arg2)
arg22 = simd_and(simd_lomask(fw), arg21)
return hsimd_packl(fw, simd_ifh(1, simd_eq(fw, arg12, arg11), arg12, simd_lomask(fw)), simd_ifh(1, simd_eq(fw, arg22, arg21), arg22, simd_lomask(fw)))''',
"Ops":["hsimd_packus"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"packus_blend":\
{
"body":r'''
hiPart = hsimd_packh(fw, arg1, arg2)
return simd_ifh(fw/2, hiPart, simd_constant(fw/2, 0), simd_or(simd_gt(fw/2, hiPart, simd_constant(fw/2, 0)), hsimd_packl(fw, arg1, arg2)))''',
"Ops":["hsimd_packus"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"packss_packl_blend":\
{
"body":r'''
hiBound = simd_srli(fw, 1, simd_lomask(fw))
loBound = simd_not(hiBound)
return hsimd_packl(fw, simd_ifh(1, simd_gt(fw, arg1, hiBound), hiBound, simd_ifh(1, simd_gt(fw, arg1, loBound), arg1, loBound)), simd_ifh(1, simd_gt(fw, arg2, hiBound), hiBound, simd_ifh(1, simd_gt(fw, arg2, loBound), arg2, loBound)))''',
"Ops":["hsimd_packss"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"signmask_halving_packh":\
{
"body":r'''
return hsimd_signmask(fw/2, hsimd_packh(fw, simd_constant(fw, 0), arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"signmask_halving_packss":\
{
"body":r'''
return hsimd_signmask(fw/2, hsimd_packss(fw, simd_constant(fw, 0), arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"signmask_doubling":\
{
"body":r'''
tmpAns1 = hsimd_signmask(2*fw, esimd_mergeh(fw, arg1, simd_constant(fw, 0)))
tmpAns2 = hsimd_signmask(2*fw, esimd_mergel(fw, arg1, simd_constant(fw, 0)))
return (tmpAns1<<(curRegSize/(2*fw))) + tmpAns2''',
"Ops":["hsimd_signmask"],
"Fws":range(curRegSize/32, curRegSize+1),
"Platforms":[configure.ALL],
},
"signmask_32_SSE":\
{
"body":r'''return _mm_movemask_ps(IDISA_CASTING("_mm_castsi128_ps",arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[32],
"Platforms":([configure.SSE2]),
},
"signmask_64_SSE":\
{
"body":r'''return _mm_movemask_pd(IDISA_CASTING("_mm_castsi128_pd",arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[64],
"Platforms":([arch for arch in configure.SSE_SERIES]),
},
"signmask_avx":\
{
"body":r'''
return (IDISA_CASTING("uint64_t", _mm_movemask_epi8(IDISA_CASTING("__m128i", avx_select_hi128(arg1))))<<16) | IDISA_CASTING("uint64_t", _mm_movemask_epi8(IDISA_CASTING("__m128i", avx_select_lo128(arg1))))''',
"Ops":["hsimd_signmask"],
"Fws":[8],
"Platforms":[configure.AVX],
},
"signmask_avx2_32":\
{
"body":r'''
return _mm256_movemask_ps(IDISA_CASTING("_mm256_castsi256_ps", arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[32],
"Platforms":[configure.AVX2],
},
"signmask_avx2_64":\
{
"body":r'''
return _mm256_movemask_pd(IDISA_CASTING("_mm256_castsi256_pd", arg1))''',
"Ops":["hsimd_signmask"],
"Fws":[64],
"Platforms":[configure.AVX2],
},
"signmask_16_general_128bit":\
{
"body":r'''
return ((mvmd_extract(16, 7, arg1) >> 8) & 128) | ((mvmd_extract(16, 6, arg1) >> 9) & 64) | ((mvmd_extract(16, 5, arg1) >> 10) & 32) | ((mvmd_extract(16, 4, arg1) >> 11) & 16) | ((mvmd_extract(16, 3, arg1) >> 12) & 8) | ((mvmd_extract(16, 2, arg1) >> 13) & 4) | ((mvmd_extract(16, 1, arg1) >> 14) & 2) | (mvmd_extract(16, 0, arg1) >> 15)''',
"Ops":["hsimd_signmask"],
"Fws":[16],
"Platforms":([configure.NEON] + [arch for arch in configure.SSE_SERIES]),
},
"signmask_32_general_128bit":\
{
"body":r'''
return ((mvmd_extract(32, 3, arg1) >> 28) & 8) | ((mvmd_extract(32, 2, arg1) >> 29) & 4) | ((mvmd_extract(32, 1, arg1) >> 30) & 2) | (mvmd_extract(32, 0, arg1) >> 31)''',
"Ops":["hsimd_signmask"],
"Fws":[32],
"Platforms":([configure.NEON] + [arch for arch in configure.SSE_SERIES]),
},
"signmask_64_general_128bit":\
{
"body":r'''
return ((mvmd_extract(64, 1, arg1) >> 62) & 2) | (mvmd_extract(64, 0, arg1) >> 63)''',
"Ops":["hsimd_signmask"],
"Fws":[64],
"Platforms":([configure.NEON] + [arch for arch in configure.SSE_SERIES]),
},
"merge_doubling":\
{
"body":r'''
return esimd_op(2*fw, simd_ifh(1, simd_himask(2*fw), arg1, simd_srli(2*fw, fw, arg2)), simd_ifh(1, simd_himask(2*fw), simd_slli(2*fw, fw, arg1), arg2))
''',
"Ops":["esimd_mergel", "esimd_mergeh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"merge_havling":\
{
"body":r'''
return esimd_op(fw/2, simd_ifh(1, simd_himask(fw), arg1, simd_srli(fw, fw/2, arg2)), simd_ifh(1, simd_himask(fw), simd_slli(fw, fw/2, arg1), arg2))
''',
"Ops":["esimd_mergel", "esimd_mergeh"],
"Fws":range(1, curRegSize/2+1),
"Platforms":[configure.ALL],
},
"mergeh_avx_using_SSE_BuiltIns":\
{
"body":r'''
hiPart2 = avx_select_hi128(arg2)
hiPart1 = avx_select_hi128(arg1)
return avx_general_combine256(IDISA_PACK("_mm_unpackhi_epi$fw$", hiPart2, hiPart1), IDISA_PACK("_mm_unpacklo_epi$fw$", hiPart2, hiPart1))''',
"Ops":["esimd_mergeh"],
"Fws":[8, 16, 32, 64],
"Platforms":configure.AVX_SERIES,
},
"mergel_avx_using_SSE_BuiltIns":\
{
"body":r'''
loPart2 = avx_select_lo128(arg2)
loPart1 = avx_select_lo128(arg1)
return avx_general_combine256(IDISA_PACK("_mm_unpackhi_epi$fw$", loPart2, loPart1), IDISA_PACK("_mm_unpacklo_epi$fw$", loPart2, loPart1))''',
"Ops":["esimd_mergel"],
"Fws":[8, 16, 32, 64],
"Platforms":configure.AVX_SERIES,
},
"mergeh_64_neon":\
{
"body":r'''
return vcombine_u64(vget_high_u64(arg2), vget_high_u64(arg1))
''',
"Ops":["esimd_mergeh"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"mergel_64_neon":\
{
"body":r'''
return vcombine_u64(vget_low_u64(arg2), vget_low_u64(arg1))
''',
"Ops":["esimd_mergel"],
"Fws":[64],
"Platforms":[configure.NEON],
},
"signextendh_blend":\
{
"body":r'''
return esimd_mergeh(2*fw, simd_srai(2*fw, fw, arg1), simd_srai(2*fw, fw, simd_slli(2*fw, fw, arg1)))''',
"Ops":["esimd_signextendh"],
"Fws":range(1, curRegSize/2),
"Platforms":[configure.ALL],
},
"singextendh_half_curRegSize_blend":\
{
"body":r'''
return simd_srai(2*fw, fw, arg1)''',
"Ops":["esimd_signextendh"],
"Fws":[curRegSize/2],
"Platforms":[configure.ALL],
},
"signextendh_using_signextendl":\
{
"body":r'''
return esimd_signextendl(fw, simd_srli(curRegSize, curRegSize/2, arg1))''',
"Ops":["esimd_signextendh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"signextendl_blend":\
{
"body":r'''
return esimd_mergel(2*fw, simd_srai(2*fw, fw, arg1), simd_srai(2*fw, fw, simd_slli(2*fw, fw, arg1)))''',
"Ops":["esimd_signextendl"],
"Fws":range(1, curRegSize/2),
"Platforms":[configure.ALL],
},
"singextendl_half_curRegSize_blend":\
{
"body":r'''
return simd_srai(2*fw, fw, simd_slli(2*fw, fw, arg1))''',
"Ops":["esimd_signextendl"],
"Fws":[curRegSize/2],
"Platforms":[configure.ALL],
},
"signextendl_8_using_Packed_Move_with_Sign_Extend":\
{
"body":r'''
return _mm_cvtepi8_epi16(arg1)''',
"Ops":["esimd_signextendl"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"signextendl_16_using_Packed_Move_with_Sign_Extend":\
{
"body":r'''
return _mm_cvtepi16_epi32(arg1)''',
"Ops":["esimd_signextendl"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"signextendl_32_using_Packed_Move_with_Sign_Extend":\
{
"body":r'''
return _mm_cvtepi32_epi64(arg1)''',
"Ops":["esimd_signextendl"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"zeroextendh_blend":\
{
"body":r'''
return esimd_mergeh(2*fw, simd_srli(2*fw, fw, arg1), simd_and(simd_lomask(2*fw), arg1))''',
"Ops":["esimd_zeroextendh"],
"Fws":range(1, curRegSize/2),
"Platforms":[configure.ALL],
},
"zeroextendh_half_curRegSize_blend":\
{
"body":r'''
return simd_srli(2*fw, fw, arg1)''',
"Ops":["esimd_zeroextendh"],
"Fws":[curRegSize/2],
"Platforms":[configure.ALL],
},
"zeroextendh_using_zeroextendl":\
{
"body":r'''
return esimd_zeroextendl(fw, simd_srli(curRegSize, curRegSize/2, arg1))''',
"Ops":["esimd_zeroextendh"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"zeroextendl_blend":\
{
"body":r'''
return esimd_mergel(2*fw, simd_srli(2*fw, fw, arg1), simd_and(simd_lomask(2*fw), arg1))''',
"Ops":["esimd_zeroextendl"],
"Fws":range(1, curRegSize/2),
"Platforms":[configure.ALL],
},
"zeroextendl_half_curRegSize_blend":\
{
"body":r'''
return simd_and(simd_lomask(2*fw), arg1)''',
"Ops":["esimd_zeroextendl"],
"Fws":[curRegSize/2],
"Platforms":[configure.ALL],
},
"zeroextendl_8_using_Packed_Move_with_Zero_Extend":\
{
"body":r'''
return _mm_cvtepu8_epi16(arg1)''',
"Ops":["esimd_zeroextendl"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"zeroextendl_16_using_Packed_Move_with_Zero_Extend":\
{
"body":r'''
return _mm_cvtepu16_epi32(arg1)''',
"Ops":["esimd_zeroextendl"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"zeroextendl_32_using_Packed_Move_with_Zero_Extend":\
{
"body":r'''
return _mm_cvtepu32_epi64(arg1)''',
"Ops":["esimd_zeroextendl"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"fill_halving":\
{
"body":r'''
return mvmd_fill2(fw/2, (val1>>fw/2), val1 & ((1<<fw/2) -1))
''',
"Ops":["mvmd_fill"],
"Fws":[16,32,64],
"Platforms":[configure.ALL],
},
"fill_halving2":\
{
"body":r'''
return mvmd_fill2(fw/2, 0, val1)
''',
"Ops":["mvmd_fill"],
"Fws":[128,256],
"Platforms":[configure.ALL],
},
# "fill_halving24":\
# {
# "body":r'''
# return mvmd_fill4(fw/2, 0, val1, 0, val2)
# ''',
# "Ops":["mvmd_fill2"],
# "Fws":[64,128,256],
# "Platforms":[configure.ALL],
# }, #This strategy seems wrong. Meng
"fill_doubling":\
{
"body":r'''
return mvmd_fill(2*fw, (val1<<fw)|val1)
''',
"Ops":["mvmd_fill"],
"Fws":range(2, 16+1),
"Platforms":[configure.ALL],
},
"fill_1_blend":\
{
#mvmd<1>::fill only accepts 0 or -1
"body":r'''
return mvmd_fill(32, -1*val1)
''',
"Ops":["mvmd_fill"],
"Fws":[1],
"Platforms":[configure.ALL],
},
"fill_64_blend":\
{
"body":r'''
return _mm_set_epi32(val1>>32, val1, val1>>32, val1)
''',
"Ops":["mvmd_fill"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"fill_128_blend":\
{
"body":r'''
return _mm_set_epi32(0, 0, val1>>32, val1)
''',
"Ops":["mvmd_fill"],
"Fws":[128],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"fill2_blend":\
{
"body":r'''
return mvmd_fill(2*fw, (val1<<fw)|(val2&((1<<fw)-1)))''',
"Ops":["mvmd_fill2"],
"Fws":range(1, 16+1),
"Platforms":[configure.ALL],
},
"fill2_himask_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(2*fw), mvmd_fill(fw, val1), mvmd_fill(fw, val2))''',
"Ops":["mvmd_fill2"],
"Fws":range(32, curRegSize/2+1),
"Platforms":[configure.ALL],
},
"fill4_fill2_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(4*fw), mvmd_fill2(fw, val1, val2), mvmd_fill2(fw, val3, val4))''',
"Ops":["mvmd_fill4"],
"Fws":range(1, curRegSize/4+1),
"Platforms":[configure.ALL],
},
"fill4_doubling":\
{
"body":r'''
return simd_or(mvmd_fill4(2*fw, val1<<fw, val3<<fw, val1<<fw, val3<<fw), mvmd_fill4(2*fw, val2&((1<<fw)-1), val4&((1<<fw)-1), val2&((1<<fw)-1), val4&((1<<fw)-1)))''',
"Ops":["mvmd_fill4"],
"Fws":range(1, curRegSize/8+1),
"Platforms":[configure.ALL],
},
"fill8_fill4_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(8*fw), mvmd_fill4(fw, val1, val2, val3, val4), mvmd_fill4(fw, val5, val6, val7, val8))''',
"Ops":["mvmd_fill8"],
"Fws":range(1, curRegSize/8+1),
"Platforms":[configure.ALL],
},
"fill8_doubling":\
{
"body":r'''
return simd_or(mvmd_fill8(2*fw, val1<<fw, val3<<fw, val5<<fw, val7<<fw, val1<<fw, val3<<fw, val5<<fw, val7<<fw), mvmd_fill8(2*fw, val2&((1<<fw)-1), val4&((1<<fw)-1), val6&((1<<fw)-1), val8&((1<<fw)-1), val2&((1<<fw)-1), val4&((1<<fw)-1), val6&((1<<fw)-1), val8&((1<<fw)-1)))''',
"Ops":["mvmd_fill8"],
"Fws":range(1, curRegSize/16+1),
"Platforms":[configure.ALL],
},
"fill16_fill8_blend":\
{
"body":r'''
return simd_ifh(1, simd_himask(16*fw), mvmd_fill8(fw, val1, val2, val3, val4, val5, val6, val7, val8), mvmd_fill8(fw, val9, val10, val11, val12, val13, val14, val15, val16))''',
"Ops":["mvmd_fill16"],
"Fws":range(1, curRegSize/16+1),
"Platforms":[configure.ALL],
},
"fill16_doubling":\
{
"body":r'''
return simd_or(mvmd_fill16(2*fw, val1<<fw, val3<<fw, val5<<fw, val7<<fw, val9<<fw, val11<<fw, val13<<fw, val15<<fw, val1<<fw, val3<<fw, val5<<fw, val7<<fw, val9<<fw, val11<<fw, val13<<fw, val15<<fw), mvmd_fill16(2*fw, val2&((1<<fw)-1), val4&((1<<fw)-1), val6&((1<<fw)-1), val8&((1<<fw)-1), val10&((1<<fw)-1), val12&((1<<fw)-1), val14&((1<<fw)-1), val16&((1<<fw)-1), val2&((1<<fw)-1), val4&((1<<fw)-1), val6&((1<<fw)-1), val8&((1<<fw)-1), val10&((1<<fw)-1), val12&((1<<fw)-1), val14&((1<<fw)-1), val16&((1<<fw)-1)))''',
"Ops":["mvmd_fill16"],
"Fws":range(1, curRegSize/32+1),
"Platforms":[configure.ALL],
},
"splat_1_blend":\
{
"body":r'''
return simd_sub(curRegSize, simd_constant(curRegSize, 0), simd_and(simd_constant(curRegSize, 1), simd_srli(curRegSize, pos, arg1)))''',
"Ops":["mvmd_splat"],
"Fws":[1],
"Platforms":[configure.ALL],
},
# "splat_doubling":\
# {
# "body":r'''
# tmpArg = simd_slli(2*fw, fw, arg1) if pos%2==0 else simd_srli(2*fw, fw, arg1)
# arg11 = simd_and(simd_lomask(2*fw), arg1) if pos%2==0 else simd_and(simd_himask(2*fw), arg1)
# return mvmd_splat(2*fw, pos/2, simd_or(tmpArg, arg11))''',
# "Ops":["mvmd_splat"],
# "Fws":range(1, curRegSize/2+1),
# "Platforms":[configure.ALL],
# },
"splat_doubling":\
{
"body":r'''
return mvmd_splat(2*fw, pos/2, simd_or(simd_slli(2*fw, fw, arg1) if pos%2==0 else simd_srli(2*fw, fw, arg1), simd_and(simd_lomask(2*fw), arg1) if pos%2==0 else simd_and(simd_himask(2*fw), arg1)))''',
"Ops":["mvmd_splat"],
"Fws":range(1, curRegSize/2+1),
"Platforms":[configure.ALL],
},
"splat_halving":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), mvmd_splat(fw/2, 2*pos+1, arg1), mvmd_splat(fw/2, 2*pos, arg1))''',
"Ops":["mvmd_splat"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"splat_32_blend":\
{
"body":r'''
return mvmd_shufflei(32, shufflemask4(pos, pos, pos, pos), arg1)''',
"Ops":["mvmd_splat"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"splat_general_neon":\
{
"body":r'''
return mvmd_fill(fw, mvmd_extract(fw, pos, arg1))''',
"Ops":["mvmd_splat"],
"Fws":[-1],
"Platforms":[configure.NEON],
},
"splat_8_using_Extract_Byte":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi8(arg1, pos))''',
"Ops":["mvmd_splat"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"splat_8_avx_using_Extract_Byte":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi8(avx_select_lo128(arg1), pos)) if (pos<16) else mvmd_fill(fw, _mm_extract_epi8(avx_select_hi128(arg1), pos-16))''',
"Ops":["mvmd_splat"],
"Fws":[8],
"Platforms":configure.AVX_SERIES,
},
"splat_16_using_Extract_Word":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi16(arg1, pos))''',
"Ops":["mvmd_splat"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"splat_16_avx_using_Extract_Byte":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi16(avx_select_lo128(arg1), pos)) if (pos<8) else mvmd_fill(fw, _mm_extract_epi16(avx_select_hi128(arg1), pos-8))''',
"Ops":["mvmd_splat"],
"Fws":[16],
"Platforms":configure.AVX_SERIES,
},
"splat_32_using_Extract_Dword":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi32(arg1, pos))''',
"Ops":["mvmd_splat"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"splat_32_avx_using_Extract_Dword":\
{
"body":r'''
return mvmd_fill(fw, _mm_extract_epi32(avx_select_lo128(arg1), pos)) if (pos<4) else mvmd_fill(fw, _mm_extract_epi32(avx_select_hi128(arg1), pos-4))''',
"Ops":["mvmd_splat"],
"Fws":[32],
"Platforms":configure.AVX_SERIES,
},
"mvmd_slli_blend":\
{
"body":r'''
return simd_slli(curRegSize, sh*fw, arg1)''',
"Ops":["mvmd_slli"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"mvmd_srli_blend":\
{
"body":r'''
return simd_srli(curRegSize, sh*fw, arg1)''',
"Ops":["mvmd_srli"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"mvmd_slli_64_avx2_permute":\
{
"body":r'''
return simd_and(_mm256_set_epi64x(18446744073709551615, 18446744073709551615, 18446744073709551615, 0), _mm256_permute4x64_epi64(arg1, 128+16)) if sh == 1 else (simd_and(_mm256_set_epi64x(18446744073709551615, 18446744073709551615, 0, 0), _mm256_permute4x64_epi64(arg1, 64)) if sh == 2 else (simd_and(_mm256_set_epi64x(18446744073709551615, 0, 0, 0), _mm256_permute4x64_epi64(arg1, 0)) if sh == 3 else (arg1 if sh == 0 else simd_constant(32, 0))))''',
"Ops":["mvmd_slli"],
"Fws": [64],
"Platforms":[configure.AVX2],
},
"mvmd_srli_64_avx2_permute":\
{
"body":r'''
return simd_and(_mm256_set_epi64x(0, 0, 0, 18446744073709551615), _mm256_permute4x64_epi64(arg1, 3)) if sh == 3 else (simd_and(_mm256_set_epi64x(0, 0, 18446744073709551615, 18446744073709551615), _mm256_permute4x64_epi64(arg1, 14)) if sh == 2 else (simd_and(_mm256_set_epi64x(0, 18446744073709551615, 18446744073709551615, 18446744073709551615), _mm256_permute4x64_epi64(arg1, 57)) if sh == 1 else (arg1 if sh == 0 else simd_constant(32, 0))))''',
"Ops":["mvmd_srli"],
"Fws":[64],
"Platforms":[configure.AVX2],
},
"shufflei_64_blend":\
{
"body":r'''
return mvmd_shufflei(32, shufflemask4_from_shufflemask2(msk), arg1)''',
"Ops":["mvmd_shufflei"],
"Fws":[64],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
# "shufflei_16_blend":\
# {
# "body":r'''
# tmphi = _mm_shufflehi_epi16(arg1, shufflemask8_to_shufflemask4(msk)>>8)
# tmpAns = _mm_shufflelo_epi16(tmphi, shufflemask8_to_shufflemask4(msk)&255)
# tmplh = _mm_shufflehi_epi16(simd_slli(128, 64, arg1), shufflemask8_to_shufflemask4(msk)>>8)
# tmphl = _mm_shufflelo_epi16(simd_srli(128, 64, arg1), shufflemask8_to_shufflemask4(msk)&255)
# a1 = 0 if ((msk>>21)&4)==0 else ((1<<(fw+1))-1)
# a2 = 0 if ((msk>>18)&4)==0 else ((1<<(fw+1))-1)
# a3 = 0 if ((msk>>15)&4)==0 else ((1<<(fw+1))-1)
# a4 = 0 if ((msk>>12)&4)==0 else ((1<<(fw+1))-1)
# a5 = ((1<<(fw+1))-1) if ((msk>>9)&4)==0 else 0
# a6 = ((1<<(fw+1))-1) if ((msk>>6)&4)==0 else 0
# a7 = ((1<<(fw+1))-1) if ((msk>>3)&4)==0 else 0
# a8 = ((1<<(fw+1))-1) if (msk&4)==0 else 0
# return simd_ifh(1, mvmd_fill8(fw, a1, a2, a3, a4, a5, a6, a7, a8), tmpAns, simd_or(tmplh, tmphl))''',
# "Ops":["mvmd_shufflei"],
# "Fws":[16],
# "Platforms":[arch for arch in configure.SSE_SERIES],
# },
# Changed to suit C generator
"shufflei_16_blend":\
{
"body":r'''
return simd_ifh(1, mvmd_fill8(fw, 0 if ((msk>>21)&4)==0 else ((1<<(fw+1))-1), 0 if ((msk>>18)&4)==0 else ((1<<(fw+1))-1), 0 if ((msk>>15)&4)==0 else ((1<<(fw+1))-1), 0 if ((msk>>12)&4)==0 else ((1<<(fw+1))-1), ((1<<(fw+1))-1) if ((msk>>9)&4)==0 else 0, ((1<<(fw+1))-1) if ((msk>>6)&4)==0 else 0, ((1<<(fw+1))-1) if ((msk>>3)&4)==0 else 0, ((1<<(fw+1))-1) if (msk&4)==0 else 0), _mm_shufflelo_epi16(_mm_shufflehi_epi16(arg1, shufflemask8_to_shufflemask4(msk)>>8), shufflemask8_to_shufflemask4(msk)&255), simd_or(_mm_shufflehi_epi16(simd_slli(128, 64, arg1), shufflemask8_to_shufflemask4(msk)>>8), _mm_shufflelo_epi16(simd_srli(128, 64, arg1), shufflemask8_to_shufflemask4(msk)&255)))''',
"Ops":["mvmd_shufflei"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"dslli_blend":\
{
"body":r'''
return simd_or(mvmd_slli(fw, sh, arg1), mvmd_srli(fw, curRegSize/fw-sh, arg2))''',
"Ops":["mvmd_dslli"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"dsrli_blend":\
{
"body":r'''
return simd_or(mvmd_srli(fw, sh, arg1), mvmd_slli(fw, curRegSize/fw-sh, arg2))''',
"Ops":["mvmd_dsrli"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"shuffle_halving":\
{
"body":r'''
tmp1 = simd_and(simd_constant(fw, curRegSize/fw-1), arg2)
msk1 = simd_add(fw, tmp1, tmp1)
msk2 = simd_add(fw, msk1, simd_constant(fw, 1))
msk = simd_or(msk1, simd_slli(fw, fw/2, msk2))
return simd_ifh(fw, arg2, simd_constant(fw, 0), mvmd_shuffle(fw/2, arg1, msk))''',
"Ops":["mvmd_shuffle"],
"Fws":range(2, curRegSize/2+1),
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"mvmd_extract_havling":\
{
"body":r'''
return (((IDISA_CASTING("uint64_t", mvmd_extract(fw/2, 2*pos+1, arg1)))<<(fw/2)) | mvmd_extract(fw/2, 2*pos, arg1))''',
"Ops":["mvmd_extract"],
"Fws":range(2, 64+1),
"Platforms":[configure.ALL],
},#IDISA_CASTING("uint64_t", ((1<<fw)-1)) &
"mvmd_extract_8_SSE":\
{
"body":r'''
return 255 & _mm_extract_epi8(arg1, pos)''',
"Ops":["mvmd_extract"],
"Fws":[8],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"mvmd_extract_8_AVX":\
{
"body":r'''
return (255 & _mm_extract_epi8(avx_select_lo128(arg1), pos)) if (pos<16) else (255 & _mm_extract_epi8(avx_select_hi128(arg1), pos-16))
''',
"Ops":["mvmd_extract"],
"Fws":[8],
"Platforms":configure.AVX_SERIES,
},
"mvmd_extract_16_SSE":\
{
"body":r'''
return 65535 & _mm_extract_epi16(arg1, pos)''',
"Ops":["mvmd_extract"],
"Fws":[16],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"mvmd_extract_16_AVX":\
{
"body":r'''
return (65535 & _mm_extract_epi16(avx_select_lo128(arg1), pos)) if (pos<8) else (65535 & _mm_extract_epi16(avx_select_hi128(arg1), pos-8))
''',
"Ops":["mvmd_extract"],
"Fws":[16],
"Platforms":configure.AVX_SERIES,
},
"mvmd_extract_32_SSE":\
{
"body":r'''
return IDISA_CASTING("uint64_t", (1<<32)-1) & _mm_extract_epi32(arg1, pos)''',
"Ops":["mvmd_extract"],
"Fws":[32],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"mvmd_extract_32_AVX":\
{
"body":r'''
return (IDISA_CASTING("uint64_t", (1<<32)-1) & _mm_extract_epi32(avx_select_lo128(arg1), pos)) if (pos<4) else (IDISA_CASTING("uint64_t", (1<<32)-1) & _mm_extract_epi32(avx_select_hi128(arg1), pos-4))
''',
"Ops":["mvmd_extract"],
"Fws":[32],
"Platforms":configure.AVX_SERIES,
},
"mvmd_extract_doubling":\
{
"body":r'''
return (mvmd_extract(2*fw, pos/2, arg1) & ((1<<fw)-1)) if pos%2==0 else (mvmd_extract(2*fw, pos/2, arg1)>>fw)''',
"Ops":["mvmd_extract"],
"Fws":range(1, 32+1),
"Platforms":[configure.ALL],
},
"mvmd_sXli_halving":\
{
"body":r'''
return (mvmd_op(fw/2, sh*2, arg1))''',
"Ops":["mvmd_slli", "mvmd_srli"],
"Fws":[-1],
"Platforms":[configure.ALL],
},
"simd_add_2_logic":\
{
#f0 = (a1b1)^(a0^b0)
#f1 = a1^b1
"body":r'''
tmp = simd_xor(arg1, arg2)
return simd_ifh(1, simd_himask(fw), simd_xor(tmp, simd_slli(curRegSize, 1, simd_and(arg1, arg2))), tmp)''',
"Ops":["simd_add"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_sub_2_logic":\
{
"body":r'''
tmp = simd_xor(arg1, arg2)
return simd_ifh(1, simd_himask(fw), simd_xor(tmp, simd_slli(curRegSize, 1, simd_and(simd_not(arg1), arg2))), tmp)''',
"Ops":["simd_sub"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_mult_2_logic":\
{
"body":r'''
tmp1 = simd_slli(curRegSize, 1, arg1)
tmp2 = simd_slli(curRegSize, 1, arg2)
return simd_ifh(1, simd_himask(fw), simd_or(simd_and(tmp1, simd_and(arg2, simd_or(simd_not(arg1), simd_not(tmp2)))), simd_and(arg1, simd_and(tmp2, simd_or(simd_not(tmp1), simd_not(arg2))))), simd_and(arg1, arg2))''',
"Ops":["simd_mult"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_eq_2_logic":\
{
"body":r'''
tmp = simd_xor(arg1, arg2)
tmpAns = simd_and(simd_not(simd_slli(curRegSize, 1, tmp)), simd_not(tmp))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_srli(curRegSize, 1, tmpAns))''',
"Ops":["simd_eq"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_gt_2_logic":\
{
"body":r'''
tmp = simd_not(arg1)
tmpAns = simd_or(simd_and(tmp, arg2), simd_and(simd_slli(curRegSize, 1, simd_and(arg1, simd_not(arg2))), simd_or(tmp, arg2)))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_srli(curRegSize, 1, tmpAns))''',
"Ops":["simd_gt"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_ugt_2_logic":\
{
"body":r'''
tmp = simd_not(arg2)
tmpAns = simd_or(simd_and(arg1, tmp), simd_and(simd_slli(curRegSize, 1, simd_and(arg1, tmp)), simd_or(arg1, tmp)))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_srli(curRegSize, 1, tmpAns))''',
"Ops":["simd_ugt"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_lt_2_logic":\
{
"body":r'''
tmp = simd_not(arg2)
tmpAns = simd_or(simd_and(arg1, tmp), simd_and(simd_slli(curRegSize, 1, simd_and(simd_not(arg1), arg2)), simd_or(arg1, tmp)))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_srli(curRegSize, 1, tmpAns))''',
"Ops":["simd_lt"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_ult_2_logic":\
{
"body":r'''
tmp = simd_not(arg1)
tmpAns = simd_or(simd_and(tmp, arg2), simd_and(simd_slli(curRegSize, 1, simd_and(tmp, arg2)), simd_or(tmp, arg2)))
return simd_ifh(1, simd_himask(fw), tmpAns, simd_srli(curRegSize, 1, tmpAns))''',
"Ops":["simd_ult"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_max_2_logic":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_and(arg1, arg2), simd_or(simd_and(arg2, simd_srli(curRegSize, 1, simd_or(arg1, simd_not(arg2)))), simd_and(arg1, simd_srli(curRegSize, 1, simd_or(simd_not(arg1), arg2)))))''',
"Ops":["simd_max"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_umax_2_logic":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_or(arg1, arg2), simd_or(simd_and(arg2, simd_srli(curRegSize, 1, simd_or(simd_not(arg1), arg2))), simd_and(arg1, simd_srli(curRegSize, 1, simd_or(arg1, simd_not(arg2))))))''',
"Ops":["simd_umax"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_min_2_logic":\
{
"body":r'''
tmp1 = simd_srli(curRegSize, 1, arg1)
tmp2 = simd_srli(curRegSize, 1, arg2)
return simd_ifh(1, simd_himask(fw), simd_or(arg1, arg2), simd_or(simd_and(arg1, simd_and(tmp1, simd_not(tmp2))), simd_and(arg2, simd_or(simd_and(simd_not(tmp1), tmp2), arg1))))''',
"Ops":["simd_min"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_umin_2_logic":\
{
"body":r'''
tmp1 = simd_srli(curRegSize, 1, arg1)
tmp2 = simd_srli(curRegSize, 1, arg2)
return simd_ifh(1, simd_himask(fw), simd_and(arg1, arg2), simd_or(simd_and(simd_and(tmp1, simd_not(tmp2)), arg2), simd_and(arg1, simd_or(simd_and(simd_not(tmp1), tmp2), arg2))))''',
"Ops":["simd_umin"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_abs_2_logic":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_and(arg1, simd_slli(curRegSize, 1, simd_not(arg1))), arg1)''',
"Ops":["simd_abs"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_neg_2_logic":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_xor(arg1, simd_slli(curRegSize, 1, arg1)), arg1)''',
"Ops":["simd_neg"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_add_hl_2_logic":\
{
"body":r'''
return simd_ifh(1, simd_himask(fw), simd_and(arg1, simd_slli(curRegSize, 1, arg1)), simd_xor(simd_srli(curRegSize, 1, arg1), arg1))''',
"Ops":["simd_add_hl"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_xor_hl_2_logic":\
{
"body":r'''
return simd_and(simd_lomask(fw), simd_xor(simd_srli(curRegSize, 1, arg1), arg1))''',
"Ops":["simd_xor_hl"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_ctz_2_logic":\
{
"body":r'''
tmp = simd_not(arg1)
return simd_ifh(1, simd_himask(fw), simd_and(tmp, simd_slli(curRegSize, 1, tmp)), simd_and(simd_srli(curRegSize, 1, arg1), tmp))''',
"Ops":["simd_ctz"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"bitblock_any_movemask":\
{
"body":r'''
return hsimd_signmask(8, simd_eq(8, arg1, simd_constant(8, 0))) != 0xFFFF''',
"Ops":["bitblock_any"],
"Fws":[curRegSize],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"bitblock_any_avx":\
{
"body":r'''
return _mm256_testz_si256(IDISA_CASTING("__m256i", arg1), IDISA_CASTING("__m256i", arg1)) == 0''',
"Ops":["bitblock_any"],
"Fws":[curRegSize],
"Platforms":configure.AVX_SERIES,
},
"bitblock_any_neon":\
{
"body":r'''
return hsimd_signmask(32, simd_eq(32, arg1, simd_constant(32, 0))) != 15''',
"Ops":["bitblock_any"],
"Fws":[curRegSize],
"Platforms":[configure.NEON],
},
# "bitblock_any_using_PTEST":\
# {
# "body":r'''
#return _mm_testz_si128(arg1, simd_constant(8, -1)) != 0''',
# "Ops":["bitblock_any"],
# "Fws":[curRegSize]
# },
"bitblock_all_movemask":\
{
"body":r'''
return hsimd_signmask(8, simd_eq(8, arg1, simd_constant(8, 255))) == 0xFFFF''',
"Ops":["bitblock_all"],
"Fws":[curRegSize],
"Platforms":[arch for arch in configure.SSE_SERIES],
},
"bitblock_all_avx_using_VPTEST":\
{
"body":r'''
return _mm256_testz_si256(IDISA_CASTING("__m256i", simd_not(arg1)), IDISA_CASTING("__m256i", simd_constant(8, 255))) == 1''',
"Ops":["bitblock_all"],
"Fws":[curRegSize],
"Platforms":configure.AVX_SERIES,
},
"bitblock_all_neon":\
{
"body":r'''
return hsimd_signmask(32, simd_eq(32, arg1, simd_constant(32, 4294967295))) == 15''',
"Ops":["bitblock_all"],
"Fws":[curRegSize],
"Platforms":[configure.NEON],
},
# "bitblock_all_using_PTEST":\
# {
# "body":r'''
#return _mm_testz_si128(arg1, simd_constant(8, -1)) == 1''',
# "Ops":["bitblock_all"],
# "Fws":[curRegSize]
# },
"bitblock_popcount":\
{
"body":r'''
return mvmd_extract(64, 0, simd_popcount(curRegSize, arg1))''',
"Ops":["bitblock_popcount"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"bitblock_srl":\
{
"body":r'''
return simd_srl(curRegSize, arg1, arg2)''',
"Ops":["bitblock_srl"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"bitblock_sll":\
{
"body":r'''
return simd_sll(curRegSize, arg1, arg2)''',
"Ops":["bitblock_sll"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"bitblock_srli":\
{
"body":r'''
return simd_srli(curRegSize, sh, arg1)''',
"Ops":["bitblock_srli"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"bitblock_slli":\
{
"body":r'''
return simd_slli(curRegSize, sh, arg1)''',
"Ops":["bitblock_slli"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"simd_any":\
{
"body":r'''
return simd_ugt(fw, arg1, simd_constant(8, 0))''',
"Ops":["simd_any"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"simd_all":\
{
"body":r'''
return simd_eq(fw, arg1, simd_constant(8, 255))''',
"Ops":["simd_all"],
"Fws":range(2, curRegSize+1),
"Platforms":[configure.ALL],
},
"simd_any_bitblock_any":\
{
"body":r'''
return simd_constant(8, 255) if bitblock_any(arg1) else simd_constant(8, 0)''',
"Ops":["simd_any"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"simd_all_bitblock_all":\
{
"body":r'''
return simd_constant(8, 255) if bitblock_all(arg1) else simd_constant(8, 0)''',
"Ops":["simd_all"],
"Fws":[curRegSize],
"Platforms":[configure.ALL],
},
"simd_any_2_logic":\
{
"body":r'''
t0 = simd_srli(2, 1, arg1)
f0 = simd_or(t0, simd_and(arg1, simd_xor(t0, simd_constant(8, 255))))
return simd_or(f0, simd_slli(2,1,f0))''',
"Ops":["simd_any"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"simd_all_2_logic":\
{
"body":r'''
f0 = simd_and(arg1, simd_srli(2, 1, arg1))
return simd_or(f0, simd_slli(2,1,f0))''',
"Ops":["simd_all"],
"Fws":[2],
"Platforms":[configure.ALL],
},
"hsimd_add_hl_avx2_32":\
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_hadd_epi16(alpha, beta)
''',
"Ops":["hsimd_add_hl"],
"Fws":[32],
"Platforms":[configure.AVX2],
},
"hsimd_add_hl_avx2_64":\
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_hadd_epi32(alpha, beta)
''',
"Ops":["hsimd_add_hl"],
"Fws":[64],
"Platforms":[configure.AVX2],
},
"hsimd_packus_avx2_16": \
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_packus_epi16(alpha, beta)
''',
"Ops":["hsimd_packus"],
"Fws":[16],
"Platforms":[configure.AVX2],
},
"hsimd_packus_avx2_32": \
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_packus_epi32(alpha, beta)
''',
"Ops":["hsimd_packus"],
"Fws":[32],
"Platforms":[configure.AVX2],
},
"hsimd_packss_avx2_16": \
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_packs_epi16(alpha, beta)
''',
"Ops":["hsimd_packss"],
"Fws":[16],
"Platforms":[configure.AVX2],
},
"hsimd_packss_avx2_32": \
{
"body":r'''
alpha = _mm256_permute2x128_si256(arg2, arg1, 32)
beta = _mm256_permute2x128_si256(arg2, arg1, 49)
return _mm256_packs_epi32(alpha, beta)
''',
"Ops":["hsimd_packss"],
"Fws":[32],
"Platforms":[configure.AVX2],
},
"mvmd_insert_halfing": \
{
"body":r'''
return mvmd_insert(fw/2, 2*pos, mvmd_insert(fw/2, 2*pos+1, arg1, (arg2 >> (fw/2))), (arg2 & ((1<<(fw/2)) - 1)))
''',
"Ops":["mvmd_insert"],
"Fws":range(2, 65),
"Platforms":[configure.ALL],
},
"mvmd_insert_doubling": \
{
"body":r'''
v = arg2 & ((1 << fw) - 1)
doublev = mvmd_extract(fw*2, pos/2, arg1)
return mvmd_insert(fw*2, pos/2, arg1, (((doublev >> fw) << fw) | v) if (pos & 1) == 0 else (doublev & ((1<<fw)-1) | (v << fw)))
''',
"Ops":["mvmd_insert"],
"Fws":range(2, 33),
"Platforms":[configure.ALL],
},
"mvmd_insert_16_avx2": \
{
"body":r'''
return avx_general_combine256(avx_select_hi128(arg1), _mm_insert_epi16(avx_select_lo128(arg1), arg2, pos)) if pos < 8 else avx_general_combine256(_mm_insert_epi16(avx_select_hi128(arg1), arg2, pos-8), avx_select_lo128(arg1))
''',
"Ops":["mvmd_insert"],
"Fws":[16],
"Platforms":[configure.AVX2],
},
}
return strategies
|
from common import activities
from core.activity import Activities
from discord.ext import commands
class ActivityList(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="activitylist")
@commands.is_owner()
async def activityList(self, ctx):
formatted_list = []
for activity in activities.activity_list:
formatted_list.append(f"`{activity}`")
await ctx.send(
"The current activities are:\n- " + "\n- ".join(formatted_list)
)
|
# Is Unique:
# Implement an algorithm to determine if a string has all unique characters. What if you cannot use additional data structures?
|
from django.apps import AppConfig
class SorularConfig(AppConfig):
name = 'sorular'
|
from django.shortcuts import render
from .controller import *
def index(request):
context = login(request)
return render(request, 'pokebattle/base.html', context)
def battle(request):
context = login(request)
return render(request, 'pokebattle/battle.html', context)
def my_pokemon(request):
context = login(request)
trainer = context.get('trainer')
pokemon = [get_pokemon_and_img_url(pokemon) for pokemon in trainer.pokemon_collection.all()]
quicksort(pokemon, 0, len(pokemon) - 1, "attack")
context.update({'my_pokemon': pokemon,
'pokemon_count': len(pokemon)})
return render(request, 'pokebattle/my_pokemon.html', context)
def pokemon(request, pokemon_id):
context = login(request)
pokemon = Pokemon.objects.get(pokemon_id=pokemon_id)
pokemon = get_pokemon_and_img_url(pokemon)
context.update({'pokemon': pokemon})
return render(request, 'pokebattle/pokemon_details.html', context)
def stats(request):
context = login(request)
trainer = context.get('trainer')
games = Game.objects.filter(trainer_id=trainer.id, status=2)
gamesWon = Game.objects.filter(trainer_id=trainer.id, status=2, final_result=3)
gamesLost = Game.objects.filter(trainer_id=trainer.id, status=2, final_result=1)
context.update({'games': games,
'gamesWon': gamesWon,
'gamesLost': gamesLost})
return render(request, 'pokebattle/stats.html', context)
def about(request):
context = login(request)
return render(request, 'pokebattle/about.html', context)
|
"""Test functions for fooof.utils.params."""
import numpy as np
from fooof.utils.params import *
###################################################################################################
###################################################################################################
def test_compute_knee_frequency():
assert compute_knee_frequency(100, 2)
def test_compute_time_constant():
assert compute_time_constant(100)
def test_compute_fwhm():
assert compute_fwhm(1.5)
def test_compute_gauss_std():
assert compute_gauss_std(1.0)
|
# Product of Array Except Self: https: // leetcode.com/problems/product-of-array-except-self/
# Given an integer array nums, return an array answer such that answer[i] is equal to the product of all the elements of nums except nums[i].
# The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
# You must write an algorithm that runs in O(n) time and without using the division operation.
# In this problem we have to simply calculate the prefix and suffix sums at each point
# then all we have to do is return the result of prefix * postfix at every index
class Solution:
def productExceptSelf(self, nums):
prefix, suffix = [0] * len(nums), [0] * len(nums)
prefix[0] = 1
suffix[-1] = 1
for i in range(1, len(nums)):
prefix[i] = prefix[i-1] * nums[i-1]
for i in reversed(range(len(nums) - 1)):
suffix[i] = suffix[i+1] * nums[i+1]
return [prefix[i] * suffix[i] for i in range(len(nums))]
# Aww snap this is a super neat way of doing this and it runs
# in O(N) time to calc the prefix and postfix and then all you
# have is o(N) space for creating the three of them
# technically you could reduce this to O(1) additional space
# if you create the prefix as the result and kept the suffix as a
# variable and multiplied as you solve for the suffix
# Score Card
# Did I need hints? Y
# Did you finish within 30 min? 7
# Was the solution optimal? Kinda (techincally we could do O(1) additional space but it is really still O(N))
# Were there any bugs? N
# 5 5 5 5 = 5
|
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.shortcuts import render, get_object_or_404
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from memberships.models import UserMembership
from .models import Video
class VideoListView(ListView):
model = Video
class VideoDetailView(DetailView):
model = Video
template_name = "videos/video_detail.html"
class VideoCreateView(CreateView):
model = Video
fields = ['title','description', 'videofile','thumbnail', 'private', 'tags']
def form_valid(self, form):
form.instance.uploader = self.request.user
return super().form_valid(form)
class VideoUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Video
fields = ['description']
def form_valid(self, form):
form.instance.uploader = self.request.user
return super().form_valid(form)
def test_func(self):
video = self.get_object()
if self.request.user == video.uploader:
return True
return False
class VideoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Video
success_url = '/'
def test_func(self):
video = self.get_object()
if self.request.user == video.uploader:
return True
return False
|
myt1 = (12,10,38,22)
myt2 = sorted(myt1, reverse = True)
print(myt2)
myt3 = sorted(myt1, reverse = False)
print(myt3)
print(type(myt3))
|
"""
Deep Deterministic Policy Gradient agent
Author: Sameera Lanka
Website: https://sameera-lanka.com
"""
# Torch
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import math
# Lib
import gym
import numpy as np
import random
from copy import deepcopy
#from dm_control import suite
import matplotlib.pyplot as plt
#get_ipython().magic(u'matplotlib inline')
#from IPython.display import clear_output
#from IPython import display
import os
# Files
from noise import OrnsteinUhlenbeckActionNoise as OUNoise
from replaybuffer import Buffer
from actorcritic import Actor, Critic
# Hyperparameters
ACTOR_LR = 0.001
CRITIC_LR = 0.002
MINIBATCH_SIZE = 64
NUM_EPISODES = 500
MU = 0
SIGMA = 1
CHECKPOINT_DIR = 'Github_raw_500/DDPG-PyTorch'
BUFFER_SIZE = 1000000
DISCOUNT = 0.999
TAU = 0.001
WARMUP = 70 # >= MINIBATCH_SIZE
EPSILON = 1.0
EPSILON_DECAY = 1e-7
LOGSTEP = 10
def obs2state(observation):
"""Converts observation dictionary to state tensor"""
l1 = [val.tolist() for val in list(observation.values())]
l2 = []
for sublist in l1:
try:
l2.extend(sublist)
except:
l2.append(sublist)
return torch.FloatTensor(l2).view(1, -1)
class DDPG:
def __init__(self, env):
self.env = env
#self.stateDim = obs2state(env.reset().observation).size()[1]
#self.actionDim = env.action_spec().shape[0]
self.stateDim = env.observation_space.shape[0]
self.actionDim = env.action_space.shape[0]
self.actor = Actor(self.env)
self.critic = Critic(self.env)
self.targetActor = deepcopy(Actor(self.env))
self.targetCritic = deepcopy(Critic(self.env))
self.actorOptim = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
self.criticOptim = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)
self.criticLoss = nn.MSELoss()
self.noise = OUNoise(mu=np.zeros(self.actionDim), sigma=SIGMA)
self.replayBuffer = Buffer(BUFFER_SIZE)
self.batchSize = MINIBATCH_SIZE
self.checkpoint_dir = CHECKPOINT_DIR
self.discount = DISCOUNT
self.warmup = WARMUP
self.epsilon = EPSILON
self.epsilon_decay = EPSILON_DECAY
self.rewardgraph = []
self.stepgraph = []
self.start = 0
self.end = NUM_EPISODES
def getQTarget(self, nextStateBatch, rewardBatch, terminalBatch):
"""Inputs: Batch of next states, rewards and terminal flags of size self.batchSize
Calculates the target Q-value from reward and bootstraped Q-value of next state
using the target actor and target critic
Outputs: Batch of Q-value targets"""
targetBatch = torch.FloatTensor(rewardBatch)
nonFinalMask = torch.ByteTensor(tuple(map(lambda s: s != True, terminalBatch)))
nextStateBatch = torch.cat(nextStateBatch)
nextActionBatch = self.targetActor(nextStateBatch)
nextActionBatch.volatile = False
qNext = self.targetCritic(nextStateBatch, nextActionBatch)
nonFinalMask = self.discount * nonFinalMask.type(torch.FloatTensor)
targetBatch += nonFinalMask * qNext.squeeze().data
return Variable(targetBatch, volatile = False)
def updateTargets(self, target, original):
"""Weighted average update of the target network and original network
Inputs: target actor(critic) and original actor(critic)"""
for targetParam, orgParam in zip(target.parameters(), original.parameters()):
targetParam.data.copy_((1 - TAU)*targetParam.data + \
TAU*orgParam.data)
def getMaxAction(self, curState):
"""Inputs: Current state of the episode
Returns the action which maximizes the Q-value of the current state-action pair"""
#spec = self.env.action_spec()
#minAct = Variable(torch.FloatTensor(spec.minimum), requires_grad=False)
#maxAct = Variable(torch.FloatTensor(spec.maximum), requires_grad=False)
noise = self.epsilon * Variable(torch.FloatTensor(self.noise()), volatile=False)
action = self.actor(curState)
actionNoise = action + noise
return actionNoise
def play(self, showdata=False):
print("Playing started...")
for i in range(1):
time_step = self.env.reset()
step = 0
begins = True
while True:
self.env.render()
# Get maximizing action
if begins:
curState = Variable(torch.FloatTensor(time_step).view(1, -1), volatile = False)
begins = False
else :
curState = Variable(torch.FloatTensor(time_step[0]).view(1, -1), volatile = False)
self.actor.eval()
action = self.getMaxAction(curState)
#curState.volatile = False
action.volatile = False
if showdata:
Qsa = self.critic(curState, action)
print("action:", action, " on state:", curState)
print(" with Q(s,a)=", Qsa)
# Step episode
time_step = self.env.step(action.data)
nextState = Variable(torch.FloatTensor(time_step[0]).view(1, -1))
#reward = time_step[1]
reward = myreward(time_step)
if showdata:
print(" and gets reward: ", reward)
terminal = time_step[2]
step += 1
if terminal :
print("Succeed")
break
def train(self):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
print('Training started...')
for i in range(self.start, self.end):
time_step = self.env.reset()
ep_reward = 0
step = 0
begins = True
while True:
#Visualize Training
#display.clear_output(wait=True)
if (i % LOGSTEP == 0) :
self.env.render()
#plt.show()
# Get maximizing action
if begins:
curState = Variable(torch.FloatTensor(time_step).view(1, -1), volatile = False)
begins = False
else :
curState = Variable(torch.FloatTensor(time_step[0]).view(1, -1), volatile = False)
self.actor.eval()
action = self.getMaxAction(curState)
#curState.volatile = False
action.volatile = False
self.actor.train()
if i % LOGSTEP == 0:
Qsa = self.critic(curState, action)
print("action:", action, " on state ", curState)
print(" with Q(s,a)=", Qsa)
# Step episode
time_step = self.env.step(action.data)
nextState = Variable(torch.FloatTensor(time_step[0]).view(1, -1))
#reward = time_step[1]
reward = myreward(time_step)
ep_reward += reward
terminal = time_step[2]
step += 1
# Update replay bufer
self.replayBuffer.append((curState, action, nextState, reward, terminal))
# Training loop
if len(self.replayBuffer) >= self.warmup:
curStateBatch, actionBatch, nextStateBatch, \
rewardBatch, terminalBatch = self.replayBuffer.sample_batch(self.batchSize)
curStateBatch = torch.cat(curStateBatch)
actionBatch = torch.cat(actionBatch)
qPredBatch = self.critic(curStateBatch, actionBatch).reshape(-1)
qTargetBatch = self.getQTarget(nextStateBatch, rewardBatch, terminalBatch)
# Critic update
self.criticOptim.zero_grad()
criticLoss = self.criticLoss(qPredBatch, qTargetBatch)
#criticLoss = F.smooth_l1_loss(qPredBatch, qTargetBatch)
#if step % 5 == 4 :
# print('Critic Loss: {}'.format(criticLoss))
criticLoss.backward(retain_graph=True)
self.criticOptim.step()
# Actor update
self.actorOptim.zero_grad()
actorLoss = -torch.mean(self.critic(curStateBatch, self.actor(curStateBatch)))
#if step % 5 == 4 :
# print('Actor Loss: {}'. format(actorLoss))
actorLoss.backward(retain_graph=True)
self.actorOptim.step()
# Update Targets
self.updateTargets(self.targetActor, self.actor)
self.updateTargets(self.targetCritic, self.critic)
self.epsilon -= self.epsilon_decay
if time_step[2] :
break
print(i, ':', step)
if i % 20 == 0:
self.save_checkpoint(i)
self.stepgraph.append(step)
self.rewardgraph.append(ep_reward)
def save_checkpoint(self, episode_num):
checkpointName = self.checkpoint_dir + 'ep{}.pth.tar'.format(episode_num)
checkpoint = {
'episode': episode_num,
'actor': self.actor.state_dict(),
'critic': self.critic.state_dict(),
'targetActor': self.targetActor.state_dict(),
'targetCritic': self.targetCritic.state_dict(),
'actorOpt': self.actorOptim.state_dict(),
'criticOpt': self.criticOptim.state_dict(),
'replayBuffer': self.replayBuffer,
'rewardgraph': self.rewardgraph,
'epsilon': self.epsilon,
'stepgraph': self.stepgraph
}
torch.save(checkpoint, checkpointName)
def loadCheckpoint(self, checkpointName):
if os.path.isfile(checkpointName):
print("Loading checkpoint...")
checkpoint = torch.load(checkpointName)
self.start = checkpoint['episode'] + 1
self.actor.load_state_dict(checkpoint['actor'])
self.critic.load_state_dict(checkpoint['critic'])
self.targetActor.load_state_dict(checkpoint['targetActor'])
self.targetCritic.load_state_dict(checkpoint['targetCritic'])
self.actorOptim.load_state_dict(checkpoint['actorOpt'])
self.criticOptim.load_state_dict(checkpoint['criticOpt'])
self.replayBuffer = checkpoint['replayBuffer']
self.rewardgraph = checkpoint['rewardgraph']
self.epsilon = checkpoint['epsilon']
self.stepgraph = checkpoint['stepgraph']
print('Checkpoint loaded')
else:
raise OSError('Checkpoint not found')
def myreward(time_step):
return time_step[1]
next_state, reward, done, _ = time_step
position = next_state[0]
velocity = next_state[1]
r = (3 * position + 2) * 0 + 9.8 * (math.sin(3 * position) + 1) + 0.5 * velocity**2
if done:
r += 100
return torch.tensor([r])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.