content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
# Copyright (C) 2020 anoduck
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
## Imports
import os
import re
import sys
import math
import time
import shlex
import locale
import atexit
import requests
import threading
import traceback
import argparse
import configparser
from io import StringIO
from lxml import etree
from datetime import datetime, timedelta
from itertools import count
from collections import namedtuple
## Import errors
import_errors = []
try:
import argcomplete
except ModuleNotFoundError:
import_errors.append("argcomplete")
try:
from prompt_toolkit import prompt
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.history import FileHistory
except ModuleNotFoundError:
import_errors.append("prompt_toolkit")
## Import Selenium
try:
## Requires Selenium...OK...
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
except ModuleNotFoundError:
import_errors.append("selenium")
if import_errors:
for import_error in import_errors:
print("Module '" + import_error + "' not found.")
sys.exit(1)
## Program Meta
program = "pyEbaySniper"
version = "0.1"
commands = {} # available shell commands.
variables = {} # variables are stored here.
threads = [] # bid threads
log = None
# setup config variables
def setup_vars():
reg_variable("USER", "User for ebay")
reg_variable("PASSWORD", "Password for ebay")
reg_variable(
"DRIVER",
"Driver to use with selenium",
validate=lambda v: v in ("Chrome", "Firefox"),
)
reg_variable(
"LOCALE",
"Localization for numerics and monetary stuff",
validate=lambda v: locale.setlocale(locale.LC_ALL, v),
)
reg_variable(
"BID_AHEAD_SECONDS",
"How many seconds before the actually specified time the bid should be placed",
value=3,
type=int,
)
reg_variable("HISTORY", "History file", os.path.expanduser("~/.ebay_hist"))
# reg_variable('COOKIE_FILE', 'File for cookies. (Optional)',
# os.path.expandvars('/tmp/ebay-$USER-cookie')
# )
reg_variable(
"DEBUG", "Print stacktraces and write ghostdriver.log", type=bool, value=0
)
reg_variable(
"LOGIN_URL",
"URL for ebay login page",
"https://signin.ebay.de/ws/eBayISAPI.dll?SignIn",
)
reg_variable(
"LOGIN_URL_RE",
"RegEx to check if URL is a login page",
"https://signin.ebay.de",
)
reg_variable(
"LOGIN_FIELD_PASS_RE",
"RegEx to find password input field in login page",
"passwor",
)
reg_variable(
"LOGIN_FIELD_USER_RE", "RegEx to find user input field in login page", "e-mail"
)
def print_infos():
print("\n%s version %s\n" % (program, version))
if not get_variable("LOCALE"):
print("LOCALE is unset")
print("\tYou have to explicitly set the ebay locale.")
print("\tUse 'set LOCALE <ebay locale>' to the locale of your ebay site.")
print(
"\tKeep in mind that all input inside this shell will also be interpreted according to that locale."
)
print()
try:
get_login_credentials()
except:
print("Remember to set the login credentials")
print("\tset USER <username>")
print("\tset PASSWORD <password>")
print()
## Main function
def main():
argp = argparse.ArgumentParser(
prog=program, description="Automated bidding on eBay articles"
)
argp.add_argument(
"--rc",
metavar="FILE",
help="Specify config file to read on startup",
default=os.path.join(os.path.expanduser("~"), ".ebayrc"),
)
argp.add_argument(
"--log",
metavar="FILE",
help="Specify log file",
default=os.path.expanduser("~/.ebaylog"),
)
argp.add_argument(
"file", metavar="FILE", nargs="*", help="Specify script files to execute"
)
args = argp.parse_args()
setup_vars()
log = open(args.log, "a")
if os.path.exists(args.rc):
shell_source(args.rc)
if not args.file:
print_infos()
read_stdin()
else:
for f in args.file:
shell_source(f)
def read_script(input_stream):
while True:
line = input_stream.readline()
if not line:
break
process_line(line.rstrip())
def read_stdin():
while True:
try:
if get_variable("HISTORY"):
fileHistory = FileHistory(get_variable("HISTORY"))
else:
fileHistory = None
line = prompt(
program + " > ", history=fileHistory, completer=CommandCompleter()
)
process_line(line)
except EOFError:
break
def process_line(cmdline):
if not cmdline:
return
if cmdline.startswith("#"):
return
try:
cmdline = cmdline.split(maxsplit=1)
if not cmdline:
return
if len(cmdline) == 2:
call_command(cmdline[0], cmdline[1])
else:
call_command(cmdline[0], "")
except EOFError:
sys.exit(0)
except Exception as e:
print(program + ":", cmdline[0] + ":", e)
if get_variable("DEBUG"):
traceback.print_exc(file=log)
class CommandCompleter(Completer):
def __init__(self):
self.completions = {}
for name, func in commands.items():
self.completions[name] = argcomplete.CompletionFinder(func.argparser)
def complete_command_names(self, line):
if not line:
for command in self.completions.keys():
yield Completion(command, 0)
else:
for command in self.completions.keys():
if command.startswith(line):
yield Completion(command, -len(line))
def get_completions(self, document, complete_event):
l = document.current_line_before_cursor
if not l:
return self.complete_command_names(l)
command, *arguments = l.split()
if command in self.completions:
for state in count(start=0):
completed_line = self.completions[command].rl_complete(l, state)
if completed_line:
yield Completion(completed_line[len(l) :], start_position=0)
else:
return
else:
for compl_command in self.completions.keys():
if compl_command.startswith(command):
yield Completion(compl_command, start_position=-len(l))
###
# Decorators for making functions behave like command line programs
###
def add_argument(*add_argument_args, **add_argument_kwargs):
""" Add an argparser argument inside argparsed_func. """
return (add_argument, add_argument_args, add_argument_kwargs)
def add_mutually_exclusive_group(*add_argument_calls):
""" Create mutually exclusive argument group inside argparsed_func. """
return (add_mutually_exclusive_group, add_argument_calls)
def argparse_type(f):
""" Wrapper function for using functions as argparse's type= parameter """
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
raise argparse.ArgumentTypeError(str(e))
return new_f
def argparsed_func(name, *argparse_cmds, ignore_unknown=False):
"""
Create commandline-like parameterized functions.
Call them either with func('--param', 'value')
or with func(cmdline="--param value")
"""
def decorate(f):
def command_function(*args, cmdline=None):
if len(args) and cmdline:
raise Exception("Please only provide *args or cmdline=")
elif cmdline:
arguments = shlex.split(cmdline, comments=True)
else:
arguments = args
try:
if command_function.ignore_unknown:
parsed_args = command_function.argparser.parse_known_args(arguments)
else:
parsed_args = command_function.argparser.parse_args(arguments)
if cmdline: # save our cmdline
parsed_args._cmdline = cmdline
return f(parsed_args)
except SystemExit as e:
return
argparser = argparse.ArgumentParser(prog=name, description=f.__doc__)
for cmd in argparse_cmds:
if cmd[0] == add_argument:
argparser.add_argument(*cmd[1], **cmd[2])
elif cmd[0] == add_mutually_exclusive_group:
group = argparser.add_mutually_exclusive_group()
for subcmd in cmd[1]:
group.add_argument(*subcmd[1], **subcmd[2])
command_function.name = name
command_function.argparser = argparser
command_function.description = f.__doc__
command_function.ignore_unknown = ignore_unknown
return command_function
return decorate
###
# Functions for handling variables and commands
###
def reg_variable(name, description, value="", type=str, validate=None):
""" Register shell variable including other information """
v = namedtuple("ShellVar", ["name", "description", "value", "type", "validate"])
v.name = name
v.description = description
v.value = value
v.type = type
v.validate = validate
variables[name] = v
def get_variable_info(name):
""" Return variable info """
if name not in variables:
raise Exception("Variable '{}' not found".format(name))
return variables[name]
def get_variable(name):
""" Return value of variable """
return get_variable_info(name).value
def reg_command(name, aliases=[]):
""" Register function as command """
def decorate(f):
commands[name] = f
for alias in aliases:
commands[alias] = f
return f
return decorate
def call_command(command, cmdline):
""" Call command with commandline """
if command not in commands:
raise Exception("Command not found")
commands[command](cmdline=cmdline)
###
# Common functions
###
def get_driver():
""" Return webdriver instance as specified in config """
args = {}
if not get_variable("DEBUG"):
args["service_log_path"] = os.devnull
return {
"Chrome": webdriver.Chrome,
"Firefox": webdriver.Firefox,
}[get_variable("DRIVER")](**args)
def get_as_etree(url):
response = requests.get(url)
parser = etree.HTMLParser()
return etree.parse(StringIO(response.text), parser)
def read_timespan(ts):
""" Read sleep-like timespan, return it as seconds """
if not re.fullmatch("((\d+)([dwms]?))+", ts):
raise ValueError("Invalid timespan: '{}'".format(ts))
seconds = 0
for amount, multiplier in re.findall("(\d+)([dwms]?)", ts):
if multiplier == "w":
seconds += int(amount) * 60 * 60 * 24 * 7
elif multiplier == "d":
seconds += int(amount) * 60 * 60 * 24
elif multiplier == "h":
seconds += int(amount) * 60 * 60
elif multiplier == "m":
seconds += int(amount) * 60
else:
seconds += int(amount)
return seconds
def read_datetime(dt):
""" Try to parse date in 3 different manners, return datetime """
try:
return datetime.strptime(dt, "%d.%m.%Y %H:%M:%S")
except ValueError:
pass
try:
return datetime.strptime(dt, "%m/%d/%Y %H:%M:%S")
except ValueError:
pass
try:
return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
except ValueError:
pass
raise ValueError("Not a valid date: '{}'".format(dt))
def read_price(str_bid):
""" Read bid as string, return as float. Take care of localization """
return locale.atof(str_bid)
def write_price(float_bid):
""" Write out bid as localized string """
return locale.format("%.2f", float_bid)
def get_login_credentials():
""" Return user and password, die if these parameters are unset """
user, password = get_variable("USER"), get_variable("PASSWORD")
if not user:
raise Exception("USER not set")
if not password:
raise Exception("PASSWORD not set")
return user, password
### basic shell commands
@reg_command("help")
@argparsed_func("help", add_argument("what", metavar="WHAT", nargs="?"))
def shell_help(args):
""" Show help for commands or variables """
if not args.what:
func_to_names = {}
for name, func in commands.items():
try:
func_to_names[func].append(name)
except KeyError:
func_to_names[func] = [name]
print("\nAvailable commands:\n")
for func, names in func_to_names.items():
print(" ", ("|".join(names) + ":").rjust(14), func.description)
print("\nType help <command> for further information\n")
elif args.what in commands:
print()
call_command(args.what, "--help")
print()
elif args.what in variables:
print("Description:", get_variable_info(args.what).description)
shell_set(args.what)
print()
else:
raise Exception("'{}' is neither a command nor a variable".format(what))
@reg_command("quit", aliases=["exit"])
@argparsed_func("quit")
def shell_quit(args):
""" Quit the shell """
raise EOFError
@reg_command("set")
@argparsed_func(
"set",
add_argument("name", metavar="NAME", nargs="?"),
add_argument("value", metavar="VALUE", nargs="?"),
)
def shell_set(args):
""" Set or get variable values """
if not args.name: # print all variables if nothing given
for name in variables:
shell_set(name)
elif not args.value:
print(args.name, "=", str(get_variable(args.name)))
else:
var = get_variable_info(args.name)
if var.validate and not var.validate(args.value):
raise Exception("Invalid value for '{}'".format(var.name))
var.value = var.type(args.value)
@reg_command("list", aliases=["ls"])
@argparsed_func("list")
def shell_list(args):
""" List bid threads """
for t in threads:
print(repr(t), "\n")
@reg_command("kill")
@argparsed_func("kill", add_argument("id", type=int))
def shell_kill(args):
""" Kill a bid thread """
try:
threads[args.id].cancel()
except IndexError:
raise Exception("No such thread.")
@reg_command("reload")
@argparsed_func("reload", add_argument("id", type=int))
def shell_reload(args):
""" Update article infos of thread """
try:
threads[args.id].article_infos.load()
except IndexError:
raise Exception("No such thread.")
@reg_command("source")
@argparsed_func("source", add_argument("file"))
def shell_source(args):
""" Source a script file """
if args.file == "-":
read_stdin()
else:
with open(args.file, "r") as fh:
read_script(fh)
@reg_command("bid")
@argparsed_func(
"bid",
add_mutually_exclusive_group(
add_argument(
"--now", dest="now", action="store_true", help="Place the bid immediately"
),
add_argument(
"--after",
dest="after",
metavar="TIMESPAN",
type=argparse_type(read_timespan),
help="Place bid after now + TIMESPAN. Format like /bin/sleep",
),
add_argument(
"--before",
dest="before",
metavar="TIMESPAN",
type=argparse_type(read_timespan),
help="Place the bid on ending time - TIMESPAN. Format like /bin/sleep",
),
add_argument(
"--on",
dest="on",
metavar="TIME",
type=argparse_type(read_datetime),
help="Place the bid on TIME. Format dd.mm.yy HH:MM:SS",
),
),
add_argument(
"--dry",
dest="dry",
action="count",
default=0,
help="Don't actually place the bid, but do the login though. If specified twice, also disable login",
),
add_argument("url", metavar="URL", help="URL to article"),
add_argument(
"bid", metavar="BID", type=argparse_type(read_price), help="Price to bid"
),
)
def shell_bid(args):
""" Place bid on an eBay article """
get_login_credentials() # w/o login information bid will not work, better die NOW
if args.before:
start_time = timedelta(seconds=args.before)
elif args.after:
start_time = datetime.now() + timedelta(seconds=args.after)
elif args.on:
start_time = args.on
else:
start_time = datetime.now()
bid_thread = BidThread(len(threads), args.url, args.bid, start_time, args.dry)
threads.append(bid_thread)
bid_thread.start()
class BidThread(threading.Timer):
def __init__(self, thread_id, url, bid, start_time, dry=False):
self.thread_id = thread_id
self.url = url
self.bid = bid
self.dry = dry
# initialized fields with 'empty' values
self.bid_datetime = datetime.fromtimestamp(0)
self.article_infos = EbayArticleInfoPage(self.url)
self.bidded = False
self.error = None
if isinstance(start_time, timedelta): # seconds relative to ending time
self.bid_datetime = self.article_infos.ending_datetime - start_time
else:
self.bid_datetime = start_time
seconds_to_start = math.floor(
(self.bid_datetime - datetime.now()).total_seconds()
)
if seconds_to_start > 300:
seconds_to_start -= 120 # 120 seconds for login, blacing bid etc.
threading.Timer.__init__(self, seconds_to_start, self.do_bid)
else:
threading.Timer.__init__(self, 0, self.do_bid)
if self.bid < self.article_infos.current_bid:
print(
"Warning, your bid will fail: Current price is %2.f, your bid is %2.f"
% (self.article_infos.current_bid, self.bid)
)
def start(self):
threading.Timer.start(self)
print("Job setup\n", repr(self))
def do_bid(self):
try:
driver = get_driver()
if self.dry < 2:
self.log("Logging in ...")
login_page = EbayLoginPage(driver)
user, password = get_login_credentials()
try:
login_page.login(user, password)
except:
login_page.login(user, password) # try again if failed
article_page = EbayArticleBidPage(driver, self.url)
self.log(
"Entering bid ",
write_price(self.bid),
("(dry)" if self.dry else ""),
"...",
)
if not self.dry:
article_page.enter_bid(write_price(self.bid))
seconds_to_start = math.floor(
(self.bid_datetime - datetime.now()).total_seconds()
)
seconds_to_start -= get_variable("BID_AHEAD_SECONDS")
if seconds_to_start > 0:
self.log(
"Waiting {} seconds before confirming bid ...".format(
seconds_to_start
)
)
time.sleep(seconds_to_start)
try:
self.log("Confirming bid ", ("(dry)" if self.dry else ""), "...")
self.bidded = True
if not self.dry:
article_page.confirm_bid()
self.log("Bidding done")
finally:
pass
# if not self.dry:
# time.sleep(5) # TODO: bid on something and analyze output.
# with open('/tmp/ebay-dump', 'a') as dump_fh:
# dump_fh.write('<!-- ')
# dump_fh.write(driver.current_url)
# dump_fh.write("-->\n")
# dump_fh.write(driver.page_source)
except Exception as e:
self.log("Got Exception: " + str(e))
raise
finally:
driver.quit()
def get_status(self):
if self.bidded:
return "Done"
elif self.is_alive():
bidding_in = self.bid_datetime - datetime.now()
if bidding_in.total_seconds() < 0:
return "Bidding now"
else:
bidding_in = str(bidding_in).split(".", 2)[0]
return "Waiting (" + bidding_in + ")"
else:
return "Cancelled"
def __repr__(self):
return "{}: {}\n Ending Date: {}\n Bid Date: {}\n Current Bid: {}\n Bid: {}\n Status: {}".format(
self.thread_id,
self.article_infos.title,
self.article_infos.ending_datetime.strftime("%d.%m.%Y %H:%M:%S"),
self.bid_datetime.strftime("%d.%m.%Y %H:%M:%S"),
self.article_infos.currency
+ " "
+ write_price(self.article_infos.current_bid),
self.article_infos.currency + " " + write_price(self.bid),
self.get_status(),
)
def log(self, *args):
print("[" + str(self.thread_id) + "]", *args)
@reg_command("login-test")
@argparsed_func("login-test")
def shell_login(args):
""" Try to to log in to eBay """
user, password = get_login_credentials()
try:
driver = get_driver()
login_page = EbayLoginPage(driver)
login_page.login(user, password)
print("Success")
finally:
driver.quit()
##############################################################
# Ebay Pages
##############################################################
class EbayArticleInfoPage:
def __init__(self, url):
self.url = url
self.load()
def load(self):
tree = get_as_etree(self.url)
try:
current_bid = tree.xpath('//div[@id="CurrentBid"]')[0].text
self.currency, bid = current_bid.split()
self.current_bid = read_price(bid.replace("$", ""))
except: # this is worse, but we can live without the bid either
self.currency, self.current_bid = "???", -1.0
self.title = ""
try:
for text in tree.xpath('//h1[@id="divTitle"]')[0].itertext():
self.title += text
except: # well, we can live without a title ;)
self.title = "Could not exctract article title. Consider to fix me"
ending = " ".join(
filter(
None,
map(str.strip, tree.id('TimeLeft')[0].itertext()),
)
)
ending = ending.lstrip("(")
ending = ending.rstrip(")")
ending = ending[0 : ending.rindex(" ")]
self.ending_datetime = datetime.strptime(ending, "%d. %b. %Y %H:%M:%S")
# self.ending = dateparser.parse(ending, fuzzy=True)
def __repr__(self):
return "{}:\n\tEnding Date: {}\n\tCurrent Bid: {}\n".format(
self.title,
self.ending_datetime.strftime("%d.%m.%Y %H:%M:%S"),
self.currency + " " + write_price(self.current_bid),
)
class EbayArticleBidPage:
def __init__(self, driver, url):
self.driver = driver
self.url = url
self.reset()
def reset(self):
self.driver.get(self.url)
def enter_bid(self, bid):
""" Enter the price in the bid field - without confirmation """
self.driver.find_element_by_id("MinimumBid").send_keys(bid + "\n")
# There is a problem with this, There is the bid button on the item page,
# And then there is the confirmation page that follows afterwards.
def confirm_bid(self):
""" Presses the confirmation button """
try:
confirm_button = WebDriverWait(self.driver, 5).until(
EC.element_to_be_clickable(
(By.XPATH, '//button[@id="bidButton"]')
)
)
confirm_button.click()
except Exception as e:
try:
msg = self.driver.find_element_by_xpath(
'//span[@id="MaxBidAmount-error"]'
).text
except:
raise Exception("Could not confirm bid") from e
raise Exception("Could not confirm bid: " + msg) from None
## added into to validate bid on validation page
def submit_bid(self):
""" Press the submit bid button on the next page """
try:
submit_button = WebDriverWait(self.driver, 5).until(
EC.element_to_be_clickable(
(BY.XPATH, '//input[@id="btnBid"]')
)
)
submit_button.click()
except NoSuchElementException:
raise Exception("Something wenty kerplewy submitting bid!")
def is_logged_in(self):
""" Check if you are logged in """
try:
self.driver.find_element_by_xpath('//p[contains(text(),"You must Sign In or Register for a GunBroker.com a")]')
raise Exception("You are not logged in (guest)")
except NoSuchElementException:
pass
class EbayLoginPage:
def __init__(self, driver):
self.driver = driver
def reset(self):
""" Opens eBay login page on driver """
self.driver.get(get_variable("LOGIN_URL"))
def is_login_page_open(self):
""" Checks if eBay login page is open in driver """
return re.match(get_variable("LOGIN_URL_RE"), self.driver.current_url, re.I)
def login(self, user, password):
""" Enters login credentials in current driver
and checks if login succeeded """
self.reset()
old_url = self.driver.current_url
config_file = open("user.cfg")
user_data = json.loads(config_file.read())
config_file.close()
login_form_user_input = None
login_form_pass_input = None
for i in self.driver.find_elements_by_id('aSignIn'):
if not i.is_displayed():
continue
placeholder = i.get_attribute("placeholder")
if placeholder:
if re.match(get_variable("LOGIN_FIELD_PASS_RE"), placeholder, re.I):
login_form_pass_input = i
elif re.match(get_variable("LOGIN_FIELD_USER_RE"), placeholder, re.I):
login_form_user_input = i
if not login_form_user_input:
raise Exception(
"Could not find user input field, try editing 'LOGIN_FIELD_USER_RE' variable"
)
if not login_form_pass_input:
raise Exception(
"Could not find password input field, try editing 'LOGIN_FIELD_USER_RE' variable"
)
login_form_pass_input.send_keys(password)
login_form_user_input.send_keys(user)
login_form_user_input.submit()
for i in range(5): # wait for new page
if self.driver.current_url != old_url:
break
time.sleep(1)
try: # check for error message
error_msg = self.driver.find_element_by_xpath('//p[contains(text(),'You have entered an invalid User Name or Password.')]')
if error_msg.is_displayed():
raise Exception("Login failed: " + error_msg.text)
except NoSuchElementException:
pass # good ;)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3.9
"""
Copyright (c) 2012, Shai Shasag
All rights reserved.
Licensed under BSD 3 clause license, see LICENSE file for details.
"""
import os
from pathlib import PurePath, Path
import collections
from typing import List, Optional, Union
def something_to_bool(something, default=False):
retVal = default
if isinstance(something, bool):
retVal = something
elif isinstance(something, int):
if something == 0:
retVal = False
else:
retVal = True
elif isinstance(something, str):
if something.lower() in ("yes", "true", "y", 't', '1'):
retVal = True
elif something.lower() in ("no", "false", "n", "f", '0'):
retVal = False
return retVal
class ConfigVar:
""" ConfigVar represents 1 configuration variable that can hold
zero or more values. ConfigVar can be used as either str or
list depending on the context and therefor has some methods
implementing list interface.
self.values - list of values. Values must be strings because self.values is *not* hierarchical list.
self.owner - is a reference to ConfigVarStack object that holds this
ConfigVar and is used for resolving the values that might refer to other ConfigVars
self.name - the name under which the owner keeps the ConfigVar
name is useful for debugging, but in runtime ConfigVar has
no (and should have no) use for it's own name
"""
__slots__ = ("owner", "name", "values", "callback_when_value_is_set", "callback_when_value_is_get", "dynamic")
def __init__(self, owner, name: str, *values, callback_when_value_is_set=None, callback_when_value_is_get=None) -> None:
self.owner = owner
self.name = name
self.dynamic = False
self.set_callback_when_value_is_get(callback_when_value_is_get)
self.set_callback_when_value_is_set(callback_when_value_is_set)
self.values: List[str] = list()
self.extend(values) # extend will flatten hierarchical lists
def _do_nothing_callback_when_value_is_set(self, *argv, **kwargs):
pass
def set_callback_when_value_is_set(self, new_callback_when_value_is_set):
if new_callback_when_value_is_set is None:
self.callback_when_value_is_set = self._do_nothing_callback_when_value_is_set
else:
self.callback_when_value_is_set = new_callback_when_value_is_set
def set_callback_when_value_is_get(self, new_callback_when_value_is_get):
if new_callback_when_value_is_get is None:
self.callback_when_value_is_get = self.owner.resolve_str
else:
self.callback_when_value_is_get = new_callback_when_value_is_get
self.dynamic = True
def __len__(self) -> int:
""" :return: number of values """
retVal = len(self.values)
return retVal
def __repr__(self) -> str:
""" :return: string that can be eval()'ed to recreate the ConfigVar """
repr_str = f"""{self.__class__.__name__}("{self.name}", *{self.values})"""
return repr_str
def __bool__(self):
""" From RafeKettler/magicmethods: Defines behavior for when bool() is called on an instance of your class. Should return True or False, depending on whether you would want to consider the instance to be True or False.
:return: True if there is a single value and when converted to lower case
is one of ("yes", "true", "y", 't')
False otherwise
"""
retVal = False
if len(self.values) == 1:
retVal = something_to_bool(self.values[0], False)
return retVal
def __contains__(self, val: str) -> bool:
retVal = val in self.resolve_values()
return retVal
def resolve_values(self) -> List:
resolved_values = [self.callback_when_value_is_get(val) for val in self.values]
return resolved_values
def join(self, sep: str) -> str:
retVal = sep.join(val for val in self.resolve_values())
return retVal
def __str__(self) -> str:
"""
calls the owner to resolve each of the values and joins them.
this is the main method to resolve and use a ConfigVar as a single value
e.g.:
var_list["a"].extend("a", "b")
print(str(var_list["a"]))
will print:
ab
:return: a single string that is resolved representation of the values.
if self.values is empty an empty string is returned
"""
if len(self.values) == 1 and self.values[0] is None:
retVal = None
else:
retVal = self.join(sep='')
return retVal
def __fspath__(self) -> str:
""" implements os.PathLike - https://docs.python.org/3.6/library/os.html#os.PathLike
so configVar can be passed to pathlib, os.path, etc
we do not really know if the configVar actually represents
a path, we just return is as a string, hoping to cut redundant slashes and such.
"""
retVal = os.fspath(PurePath(self.str()))
return retVal
def Path(self, resolve: bool=False) -> Optional[Path]:
retVal = None
if self.values and self.values[0]:
if resolve:
expanded_path = os.path.expandvars(self.str())
path_path = Path(expanded_path)
retVal = path_path.resolve()
else:
retVal = Path(self.str())
return retVal
def PurePath(self) -> Optional[PurePath]:
retVal = None
if self.values and self.values[0]:
retVal = PurePath(self.str())
return retVal
def __int__(self) -> int:
retVal = int(self.join(sep=''))
return retVal
def __float__(self) -> float:
retVal = float(self.join(sep=''))
return retVal
def __iter__(self):
"""
calls the owner to resolve each of the values.
this is the method to resolve and use a ConfigVar as a list of values
e.g.:
var_list["a"].extend("a", "b")
for val in var_list["a"]:
print(val)
will print:
a
b
:return: iterator on resolved representation of the values
"""
for val in self.values:
if self.dynamic:
val = self.callback_when_value_is_get(val)
yield from self.owner.resolve_str_to_list(val)
def str(self) -> str:
return str(self)
def list(self) -> List:
return list(iter(self))
def set(self) -> List:
return set(iter(self))
def int(self) -> int:
return int(self)
def bool(self) -> bool:
return bool(self)
def float(self) -> float:
return float(self)
def __getitem__(self, index: int) -> str:
"""
calls the owner to resolve one of the values by it's index.
e.g.:
var_list["a"].extend("a", "b")
print(str(var_list["a"][1]))
will print:
b
:return: resolved representation of one of the values
"""
retVal = self.callback_when_value_is_get(self.values[index])
return retVal
def append(self, value):
"""
append a single value to the ConfigVar's values
:param value: either str or int (TBD is limitations needed ?)
None values are ignored and not appended (TBD should we allow None values? or is empty list enough?)
"""
if value is not None:
self.values.append(str(value))
self.callback_when_value_is_set(self.name, value)
def extend(self, values):
"""
append a multiple value to the ConfigVar's values
but if string is passed it will not be treated like a list
of characters and will be added as a single value.
"""
if isinstance(values, (str, int, float, type(None))):
# so str will not be treated as a list of characters
self.append(values)
elif isinstance(values, collections.abc.Sequence):
for val in values:
self.extend(val) # flatten nested lists
elif isinstance(values, os.PathLike):
self.append(os.fspath(values))
else:
raise TypeError(f"configVar('{self.name}') type of values '{values}' should be str int or sequence not {type(values)}")
def clear(self):
""" erase all values """
if self.values:
self.values.clear()
def raw(self, join_sep: Optional[str] = "") -> Union[str, List[str]]:
""" return the list of values unresolved"""
if join_sep is None:
return self.values
else:
return join_sep.join(self.values)
|
from pathlib import Path
import typer
from cgr_gwas_qc.parsers import sample_sheet
from cgr_gwas_qc.reporting import REPORT_NAME_MAPPER
from cgr_gwas_qc.workflow.scripts import sample_qc_table
app = typer.Typer(add_completion=False)
COLUMNS = (
"SR_Subject_ID",
"LIMS_Individual_ID",
"Sample_ID",
"Project-Sample ID",
"Call_Rate_Initial",
"Low Call Rate",
"Contaminated",
"Sex Discordant",
"Expected Replicate Discordance",
"Unexpected Replicate",
)
@app.command()
def main(sample_sheet_csv: Path, sample_qc_csv: Path, outfile: Path):
ss = sample_sheet.read(sample_sheet_csv, all_user_column=True, remove_exclusions=False)
qc = sample_qc_table.read(sample_qc_csv).rename(REPORT_NAME_MAPPER, axis=1)
# Merge and drop duplicate rows from ss
df = qc.merge(ss, on="Sample_ID", how="outer", suffixes=["", "_DROP"]).filter(
regex="^(?!.*_DROP)", axis=1
)
# Adjust names and column order to match legacy
df.reindex(COLUMNS, axis=1).to_csv(outfile, index=False)
if __name__ == "__main__":
if "snakemake" in locals():
defaults = {
**{k: Path(v) for k, v in snakemake.input.items()}, # type: ignore # noqa
**{"outfile": Path(snakemake.output[0])}, # type: ignore # noqa
}
main(**defaults)
else:
app()
|
# -*- coding: utf-8 -*-
from allink_core.core.loading import get_model
from allink_core.core.sitemap import HrefLangSitemap
People = get_model('people', 'People')
class PeopleSitemap(HrefLangSitemap):
changefreq = "never"
priority = 0.5
i18n = True
def __init__(self, *args, **kwargs):
self.namespace = kwargs.pop('namespace', None)
super(PeopleSitemap, self).__init__(*args, **kwargs)
def items(self):
return People.objects.translated()
def lastmod(self, obj):
return obj.modified
|
from __pyosshell__ import *
from __cluster__ import *
class MD_Operator(object):
def __init__(self):
seed = int(np.random.uniform(1,1000000)+0.5)
self.verbose = True
self.mdrun_cmd = ''
self.grompp_cmd = ''
self.tag = 'MD_'
self.opt ={'_INTEGRATOR' : 'md', # steep md sd ...
'_DT' : 0.001,
'_NSTEPS' : 100000,
'_TINIT' : 0,
'_TRROUT' : 0,
'_LOGOUT' : 1000,
'_XTCOUT' : 1000,
'_PBC' : 'xyz', # no xy xyz
'_CUTOFF' : 1.2,
'_COULOMB' : 'PME',
'_NSTLIST' : 10,
'_NS_TYPE' : 'grid',
'_COMM_MODE' : 'linear', # none linear angular
'_NSTCOMM' : 10,
'_COMM_GRPS' : ' ',
'_TCOUPL' : 'Berendsen', # no Berendsen Nose-Hoover v-rescale
'_TC_GRPS' : 'System',
'_TAU_T' : 2,
'_REF_T' : 300,
'_PCOUPLING' : 'Berendsen', # no Berendsen Parrinello-Rahman MTTK
'_PCOUPLTYPE' : 'anisotropic', # isotropic semiisotropic anisotropic
'_TAU_P' : '1.0 1.0 1.0 0.0 0.0 0.0',
'_COMPRESSIBILITY' : '4.5e-5 4.5e-5 4.5e-5 0.0 0.0 0.0',
'_REF_P' : '1.0 1.0 1.0 0.0 0.0 0.0',
'_ANNEALING' : 'no', # no single periodic
'_ANNEAL_NPOINTS' : '',
'_ANNEAL_TIME' : '',
'_ANNEAL_TEMP' : '',
'_GEN_VEL' : 'yes', # none hbonds all-bonds h-angles all-angles
'_GEN_TEMP' : 300,
'_SEED' : seed,
'_CONSTRAINTS' : 'all-bonds',
'_ACC_GRPS' : '',
'_ACCELERATE' : '', # a_x a_y a_z for each group
'_ENERGYGRP_EXCL' : '',
'_ENERGYGRPS' : '',
'_FREEZEDIM' : '',
'_FREEZEGRPS' : ''} # Y Y N N N N for two groups
def silence(self):
self.verbose = False
def Set(self,key,value):
try:
self.opt[key] = value
if self.verbose:
print "MD: Set %-20s = %-20s" % (key, str(value))
except KeyError:
print "No such key", key, "in options"
assert False
def Tag(self,tag):
self.tag = tag
def gen_mdrun_cmd(self,
_s = 'topol.tpr',
_o = 'traj.trr',
_x = 'traj.xtc',
_c = 'confout.gro',
_cpo = 'state.cpt',
_cpt = 18,
_maxh = 36,
_d = '_d'):
cmd = 'mdrun%0s -s %1s -o %1s -x %1s -c %1s -cpo %1s -cpt %1d -maxh %1d' \
% (_d,_s,_o,_x,_c,_cpo,_cpt,_maxh)
self.mdrun_cmd = cmd
return cmd
def gen_grompp_cmd(self,
_c = 'conf.gro',
_p = 'topol.top',
_f = 'grompp.mdp',
_n = '',
_o = 'topol.tpr',
_maxnum = 0):
# Index file?
if _n != '':
_n = '-n '+_n+' '
cmd = 'grompp -c %1s -p %1s -f %1s %0s-o %1s -maxwarn %1d' \
% (_c,_p,_f,_n,_o,_maxnum)
self.grompp_cmd = cmd
return cmd
def auto_grompp(self, tpr = 'topol.tpr', maxwarn = 1):
extDict = dict_by_ext()
gro = extDict['gro']
top = extDict['top']
if not 'grompp.mdp' in os.listdir('./'):
self.write_grompp_mdp()
self.gen_grompp_cmd(gro,top,_o=tpr,_maxnum=maxwarn)
sig = os.system('%s &> /dev/null' % self.grompp_cmd)
if sig:
print "Grompp failed"
sys.exit(1)
os.system('rm grompp.mdp mdout.mdp')
return
def write_grompp_mdp(self,outfile = 'grompp.mdp', fill = True):
print "Generating MDP file",
outt = open(outfile,'w')
outt.write('''; CREATED BY __MDSHELL__PY
; RUN CONTROL PARAMETERS
integrator = _INTEGRATOR
; Start time and timestep in ps
tinit = _TINIT
dt = _DT
nsteps = _NSTEPS
; For exact run continuation or redoing part of a run
init_step = 0
; mode for center of mass motion removal
;comm-mode = None
;comm-mode = None
comm-mode = _COMM_MODE
;comm-mode = Angular
; number of steps for center of mass motion removal
nstcomm = _NSTCOMM
; group(s) for center of mass motion removal
comm-grps = _COMM_GRPS
; LANGEVIN DYNAMICS OPTIONS
; Temperature, friction coefficient (amu/ps) and random seed
bd-fric = 0.5
ld-seed = _SEED
; ENERGY MINIMIZATION OPTIONS
; Force tolerance and initial step-size
emtol = 1
emstep = 0.01
; Max number of iterations in relax_shells
niter = 20
; Step size (1/ps^2) for minimization of flexible constraints
fcstep = 0
; Frequency of steepest descents steps when doing CG
nstcgsteep = 1000
nbfgscorr = 10
; OUTPUT CONTROL OPTIONS
; Output frequency for coords (x), velocities (v) and forces (f)
nstxout = _TRROUT
nstvout = _TRROUT
nstfout = _TRROUT
; Checkpointing helps you continue after crashes
nstcheckpoint = 0
; Output frequency for energies to log file and energy file
nstlog = _LOGOUT
nstenergy = _LOGOUT
; Output frequency and precision for xtc file
nstxtcout = _XTCOUT
xtc-precision = 1000
; This selects the subset of atoms for the xtc file. You can
; select multiple groups. By default all atoms will be written.
xtc-grps =
; Selection of energy groups
energygrps = _ENERGYGRPS
; NEIGHBORSEARCHING PARAMETERS
; nblist update frequency
nstlist = _NSTLIST
; ns algorithm (simple or grid)
ns_type = _NS_TYPE
; Periodic boundary conditions: xyz (default), no (vacuum)
; or full (infinite systems only)
pbc = _PBC
;pbc = xyz
;pbc = no
; nblist cut-off
rlist = _CUTOFF
; OPTIONS FOR ELECTROSTATICS AND VDW
; Method for doing electrostatics
;coulombtype = Cut-off
coulombtype = _COULOMB
;coulombtype = PME
rcoulomb-switch = 0
rcoulomb = _CUTOFF
; Dielectric constant (DC) for cut-off or DC of reaction field
epsilon-r = 1
; Method for doing Van der Waals
vdw-type = Cut-off
; cut-off lengths
rvdw-switch = 0
rvdw = _CUTOFF
; Apply long range dispersion corrections for Energy and Pressure
;DispCorr = EnerPres
; Extension of the potential lookup tables beyond the cut-off
table-extension = 1
; Spacing for the PME/PPPM FFT grid
fourierspacing = 0.12
; FFT grid size, when a value is 0 fourierspacing will be used
fourier_nx = 0
fourier_ny = 0
fourier_nz = 0
; EWALD/PME/PPPM parameters
pme_order = 4
ewald_rtol = 1e-05
ewald_geometry = 3d
epsilon_surface = 0
optimize_fft = no
; GENERALIZED BORN ELECTROSTATICS
; Algorithm for calculating Born radii
gb_algorithm = Still
; Frequency of calculating the Born radii inside rlist
nstgbradii = 1
; Cutoff for Born radii calculation; the contribution from atoms
; between rlist and rgbradii is updated every nstlist steps
rgbradii = 2
; Salt concentration in M for Generalized Born models
gb_saltconc = 0
; IMPLICIT SOLVENT (for use with Generalized Born electrostatics)
implicit_solvent = No
; OPTIONS FOR WEAK COUPLING ALGORITHMS
; Temperature coupling
Tcoupl = _TCOUPL
;Tcoupl = Berendsen
;Tcoupl = nose-hoover
; Groups to couple separately
tc-grps = _TC_GRPS
; Time constant (ps) and reference temperature (K)
tau_t = _TAU_T
ref_t = _REF_T
; Pressure coupling
;Pcoupl = Parrinello-Rahman
Pcoupl = _PCOUPLING
;Pcoupl = no
Pcoupltype = _PCOUPLTYPE
; Time constant (ps), compressibility (1/bar) and reference P (bar)
tau_p = _TAU_P
compressibility = _COMPRESSIBILITY
ref_p = _REF_P
; Random seed for Andersen thermostat
andersen_seed = _SEED
; SIMULATED ANNEALING
; Type of annealing for each temperature group (no/single/periodic)
annealing = _ANNEALING
; Number of time points to use for specifying annealing in each group
annealing_npoints = _ANNEAL_NPOINTS
; List of times at the annealing points for each group
annealing_time = _ANNEAL_TIME
; Temp. at each annealing point, for each group.
annealing_temp = _ANNEAL_TEMP
; GENERATE VELOCITIES FOR STARTUP RUN
;gen_vel = no
gen_vel = _GEN_VEL
gen_temp = _GEN_TEMP
gen_seed = _SEED
; OPTIONS FOR BONDS
;constraints = none
constraints = _CONSTRAINTS
; Type of constraint algorithm
constraint-algorithm = Lincs
; Do not constrain the start configuration
unconstrained-start = no
; Use successive overrelaxation to reduce the number of shake iterations
Shake-SOR = no
; Relative tolerance of shake
shake-tol = 1e-04
; Highest order in the expansion of the constraint coupling matrix
lincs-order = 4
; Number of iterations in the final step of LINCS. 1 is fine for
; normal simulations, but use 2 to conserve energy in NVE runs.
; For energy minimization with constraints it should be 4 to 8.
lincs-iter = 8
; Lincs will write a warning to the stderr if in one step a bond
; rotates over more degrees than
lincs-warnangle = 30
; Convert harmonic bonds to morse potentials
morse = no
; ENERGY GROUP EXCLUSIONS
; Pairs of energy groups for which all non-bonded interactions are excluded
energygrp_excl = _ENERGYGRP_EXCL
; NMR refinement stuff
; Distance restraints type: No, Simple or Ensemble
disre = No
; Force weighting of pairs in one distance restraint: Conservative or Equal
disre-weighting = Conservative
; Use sqrt of the time averaged times the instantaneous violation
disre-mixed = no
disre-fc = 1000
disre-tau = 0
; Output frequency for pair distances to energy file
nstdisreout = 100
; Orientation restraints: No or Yes
orire = no
; Orientation restraints force constant and tau for time averaging
orire-fc = 0
orire-tau = 0
orire-fitgrp =
; Output frequency for trace(SD) to energy file
nstorireout = 100
; Dihedral angle restraints: No, Simple or Ensemble
dihre = No
dihre-fc = 1000
dihre-tau = 0
; Output frequency for dihedral values to energy file
nstdihreout = 100
; Free energy control stuff
free-energy = no
init-lambda = 0
delta-lambda = 0
sc-alpha = 0
sc-sigma = 0.3
; Non-equilibrium MD stuff
acc-grps = _ACC_GRPS
accelerate = _ACCELERATE
freezegrps = _FREEZEGRPS
freezedim = _FREEZEDIM
cos-acceleration = 0
; Electric fields
; Format is number of terms (int) and for all terms an amplitude (real)
; and a phase angle (real)
E-x =
E-xt =
E-y =
E-yt =
E-z =
E-zt =
; User defined thingies
user1-grps =
user2-grps =
userint1 = 0
userint2 = 0
userint3 = 0
userint4 = 0
userreal1 = 0
userreal2 = 0
userreal3 = 0
userreal4 = 0
''')
outt.close()
if fill:
print "- setting MD options."
self.opt['_SEED'] = int(np.random.uniform(1,1000000)+0.5)
subs = [ key for key in self.opt.keys() ]
reps = [ str(self.opt[key]) for key in self.opt.keys() ]
auto_replace(outfile,subs,reps)
else:
print "..."
def write_qmd_sh(self, outfile = 'qmd.sh', username = getpass.getuser()):
write_qsub_sh_template(outfile, username)
auto_replace(outfile,
['_DESCRIPTION','_GROMPP_CMD','#_MDRUN_CMD','_USERNAME'],
[self.tag,self.grompp_cmd,self.mdrun_cmd,username])
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .files.action import Files
from .run.action import Run
|
import wikipedia
'''
This is a wikipedia API usage sample.
'''
query = str(input('input: '))
wikipedia.summary(query, sentences=4)
print(wikipedia.summary(query, sentences=4))
|
import os
import settings
import flask
from flask import send_file, request, abort, render_template
from functools import wraps
import bucket
import image
app = flask.Flask(__name__)
app.config["DEBUG"] = True
def require_api_key(view_function):
@wraps(view_function)
def decorated_function(*args, **kwargs):
api_key = os.environ.get('api_key')
if request.headers.get('x-api-key') and request.headers.get('x-api-key') == api_key:
return view_function(*args, **kwargs)
elif request.args.get('key') and request.args.get('key') == api_key:
return view_function(*args, **kwargs)
else:
abort(401)
return decorated_function
@app.route('/images/<size>/<filename>', methods=['GET'])
def get_resized_image(size, filename):
w, h = size.split("x")
_, ext = filename.split(".")
filename_with_size = "{}_{}x{}.jpg".format(filename, w, h)
file_path = bucket.download(filename_with_size)
if file_path:
return send_file(file_path, mimetype='image/{}'.format(ext))
file_path = bucket.download(filename)
if not file_path:
return abort(404)
resized_file_path = image.resize(file_path, (int(w), int(h)))
bucket.upload(resized_file_path, filename)
return send_file(resized_file_path, mimetype='image/{}'.format(ext))
@app.route('/images/<filename>', methods=['GET'])
def get_image(filename):
_, ext = filename.split(".")
file_path = bucket.download(filename)
if not file_path:
return abort(404)
return send_file(file_path, mimetype='image/{}'.format(ext))
@app.route('/images', methods=['post'])
@require_api_key
def upload():
if request.files['image'].filename != '':
image = request.files['image']
file_path = './.tmp/{}'.format(image.filename)
image.save(file_path)
uploaded = bucket.upload(file_path, image.filename)
if uploaded:
return "uploaded"
return abort(400)
return abort(500)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/api/docs')
def docs():
return render_template('docs.html')
if __name__ == '__main__':
app.run()
|
from django.core.exceptions import PermissionDenied
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils.translation import gettext as _
from ...core.exceptions import Banned
from ..bans import get_user_ban
from ..decorators import deny_authenticated, deny_banned_ips
from ..tokens import is_activation_token_valid
User = get_user_model()
def activation_view(f):
@deny_authenticated
@deny_banned_ips
def decorator(request, *args, **kwargs):
if request.settings.enable_sso:
raise PermissionDenied(
_("Please use the 3rd party site to activate your account.")
)
return f(request, *args, **kwargs)
return decorator
@activation_view
def request_activation(request):
request.frontend_context.update(
{"SEND_ACTIVATION_API": reverse("misago:api:send-activation")}
)
return render(request, "misago/activation/request.html")
class ActivationStopped(Exception):
pass
class ActivationError(Exception):
pass
@activation_view
def activate_by_token(request, pk, token):
inactive_user = get_object_or_404(User, pk=pk, is_active=True)
try:
if not inactive_user.requires_activation:
message = _("%(user)s, your account is already active.")
raise ActivationStopped(message % {"user": inactive_user.username})
if not is_activation_token_valid(inactive_user, token):
message = _(
"%(user)s, your activation link is invalid. "
"Try again or request new activation link."
)
raise ActivationError(message % {"user": inactive_user.username})
ban = get_user_ban(inactive_user, request.cache_versions)
if ban:
raise Banned(ban)
except ActivationStopped as e:
return render(request, "misago/activation/stopped.html", {"message": e.args[0]})
except ActivationError as e:
return render(
request, "misago/activation/error.html", {"message": e.args[0]}, status=400
)
inactive_user.requires_activation = User.ACTIVATION_NONE
inactive_user.save(update_fields=["requires_activation"])
message = _("%(user)s, your account has been activated!")
return render(
request,
"misago/activation/done.html",
{"message": message % {"user": inactive_user.username}},
)
|
"""
Rethinking Portrait Matting with Privacy Preserving
Copyright (c) 2022, Sihan Ma (sima7436@uni.sydney.edu.au) and Jizhizi Li (jili8515@uni.sydney.edu.au)
Licensed under the MIT License (see LICENSE for details)
Github repo: https://github.com/ViTAE-Transformer/ViTAE-Transformer-Matting.git
Paper link: https://arxiv.org/abs/2203.16828
"""
import os
import shutil
import cv2
import numpy as np
import torch
import glob
import functools
from torchvision import transforms
##########################
### Pure functions
##########################
def extract_pure_name(original_name):
pure_name, extention = os.path.splitext(original_name)
return pure_name
def listdir_nohidden(path):
new_list = []
for f in os.listdir(path):
if not f.startswith('.'):
new_list.append(f)
new_list.sort()
return new_list
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def refresh_folder(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
else:
shutil.rmtree(folder_path)
os.makedirs(folder_path)
def save_test_result(save_dir, predict):
predict = (predict * 255).astype(np.uint8)
cv2.imwrite(save_dir, predict)
def generate_composite_img(img, alpha_channel):
b_channel, g_channel, r_channel = cv2.split(img)
b_channel = b_channel * alpha_channel
g_channel = g_channel * alpha_channel
r_channel = r_channel * alpha_channel
alpha_channel = (alpha_channel*255).astype(b_channel.dtype)
img_BGRA = cv2.merge((r_channel,g_channel,b_channel,alpha_channel))
return img_BGRA
##########################
### for dataset processing
##########################
def trim_img(img):
if img.ndim>2:
img = img[:,:,0]
return img
def gen_trimap_with_dilate(alpha, kernel_size):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
fg_and_unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
fg = np.array(np.equal(alpha, 255).astype(np.float32))
dilate = cv2.dilate(fg_and_unknown, kernel, iterations=1)
erode = cv2.erode(fg, kernel, iterations=1)
trimap = erode *255 + (dilate-erode)*128
return trimap.astype(np.uint8)
def normalize_batch_torch(data_t):
normalize_transform = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
new_data = []
for i in range(data_t.shape[0]):
new_data.append(normalize_transform(data_t[i]))
return torch.stack(new_data, dim=0)
##########################
### Functions for fusion
##########################
def gen_trimap_from_segmap_e2e(segmap):
trimap = np.argmax(segmap, axis=1)[0]
trimap = trimap.astype(np.int64)
trimap[trimap==1]=128
trimap[trimap==2]=255
return trimap.astype(np.uint8)
def get_masked_local_from_global(global_sigmoid, local_sigmoid):
values, index = torch.max(global_sigmoid,1)
index = index[:,None,:,:].float()
### index <===> [0, 1, 2]
### bg_mask <===> [1, 0, 0]
bg_mask = index.clone()
bg_mask[bg_mask==2]=1
bg_mask = 1- bg_mask
### trimap_mask <===> [0, 1, 0]
trimap_mask = index.clone()
trimap_mask[trimap_mask==2]=0
### fg_mask <===> [0, 0, 1]
fg_mask = index.clone()
fg_mask[fg_mask==1]=0
fg_mask[fg_mask==2]=1
fusion_sigmoid = local_sigmoid*trimap_mask+fg_mask
return fusion_sigmoid
def get_masked_local_from_global_test(global_result, local_result):
weighted_global = np.ones(global_result.shape)
weighted_global[global_result==255] = 0
weighted_global[global_result==0] = 0
fusion_result = global_result*(1.-weighted_global)/255+local_result*weighted_global
return fusion_result
#######################################
### Function to generate training data
#######################################
def get_valid_names(*dirs):
# Extract valid names
name_sets = [get_name_set(d) for d in dirs]
# Reduce
def _join_and(a, b):
return a & b
valid_names = list(functools.reduce(_join_and, name_sets))
if len(valid_names) == 0:
return None
valid_names.sort()
return valid_names
def get_name_set(dir_name):
path_list = glob.glob(os.path.join(dir_name, '*'))
name_set = set()
for path in path_list:
name = os.path.basename(path)
name = os.path.splitext(name)[0]
if name.startswith(".DS"): continue
name_set.add(name)
return name_set
def list_abspath(data_dir, ext, data_list):
return [os.path.join(data_dir, name + ext)
for name in data_list] |
"""signalserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from reports.views import dashboard
from accounts.views import register
from accounts.views import custom_login
from accounts.views import custom_logout
from accounts.forms import UserForm
from django.contrib.auth import views
urlpatterns = [
url(r'^$', dashboard, name='dashboard'),
url(r'^register/', register, name='register'),
url(r'^redirect', custom_login,
name='redirect'),
url(r'^logout', custom_logout, name='logout'),
url(r'^password_reset/$', views.password_reset,
{'template_name': 'registration/password_reset.html',
'from_email': 'bavc.signalserver@gmail.com'},
name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done,
{'template_name': 'registration/password_reset_done.html'},
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
{'template_name': 'registration/reset.html'},
name='password_reset_confirm'),
url(r'^reset/done/$',
views.password_reset_complete,
{'template_name': 'registration/password_reset_complete.html'},
name='password_reset_complete'),
url(r'^fileuploads/', include('fileuploads.urls')),
url('^', include('django.contrib.auth.urls')),
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('accounts.urls')),
url(r'^policy/', include('policies.urls')),
url(r'^groups/', include('groups.urls')),
url(r'^signals/', include('signals.urls')),
url(r'^reports/', include('reports.urls')),
] + static(settings.BOWER_COMPONENTS_URL,
document_root=settings.BOWER_COMPONENTS_ROOT)
|
a = ["A", "B", "C", "D", "E"]
while True:
b = int(input())
n = int(input())
if b == 1:
for i in range(n):
b = a.pop(0)
a.append(b)
if b == 2:
for i in range(n):
b = a.pop(4)
a.insert(0, b)
if b == 3:
for i in range(n):
t = a[0]
a[0] = a[1]
a[1] = t
if b == 4 and n == 1:
break
for i in range(5):
print(a[i], end=" ")
|
import numpy
import matplotlib.pyplot as plt
import os
from os.path import dirname, join, exists
curdir = dirname(__file__)
"""
Plot experimental slope
"""
def get_time(string):
string = string.decode("ascii")
h, m, s = map(float, string.split(":"))
return 3600 * h + 60 * m + 1 * s
data = numpy.genfromtxt(join(curdir, "../data/exp/diffusion-slope.csv"),
converters={1: get_time},
delimiter=",")
plt.figure(figsize=(3, 3 * 0.8))
plt.style.use("science")
cond = numpy.where(data[:, 1] < 3600)
# plt.plot(data[:, 1][cond] / 3600, data[:, 2][cond] / 1e-3)
# plt.plot(data[:, 1][data[:, 1] > 7200] / 3600, data[:, 2][data[:, 1] > 7200] / 1e-3)
plt.plot(data[:, 1] / 3600, data[:, 2] / 1e-3)
plt.xlabel("t (h)")
plt.ylabel("Conductivity")
plt.savefig(join(curdir, "../img/diffusion-slope.svg"))
|
from django import forms
from ACCNTS.models import Invoice,PayRoll
class PaymentForm(forms.ModelForm):
class Meta:
model = Invoice
fields = ('amount',)
class SearchForm(forms.ModelForm):
start = forms.DateTimeField()
end = forms.DateTimeField()
class PayRollForm(forms.ModelForm):
class Meta:
model = PayRoll
fields = (
'employee',
'pension',
'lunch',
'month'
) |
#!/usr/bin/env python3
from common_functions import *
class ExpressStatus:
def __init__(self):
notify.init(APPINDICATOR_ID)
self.indicator = appindicator.Indicator.new(APPINDICATOR_ID, error_image,
appindicator.IndicatorCategory.APPLICATION_STATUS)
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
self.indicator.set_menu(self.build_menu())
# check existence of the app
self.existence_checker()
gtk.main()
def existence_checker(self):
"""check first if the express-vpn app exist, And prepare the thread of checking the status of the app"""
self.app_exist = cli_code()
if not self.app_exist:
self.test_app_existance = threading.Thread(target=self.check_existence)
self.test_app_existance.daemon = True
self.test_app_existance.start()
self.test_connectivity = threading.Thread(target=self.check_status)
self.test_connectivity.daemon = True
if self.app_exist:
self.test_connectivity.start()
def do_func(self, _, func):
if self.app_exist:
self.thread_fun(func)
else:
notify.Notification.new(f"ExpressVpn Status", "It Seems that ExpressVpn-app doesn't Exist on the system",
working_image).show()
def thread_fun(self, func):
"""It starts a thread for the input-func"""
do_chosen_fun = threading.Thread(target=eval(f"self.{func}"))
do_chosen_fun.daemon = True
do_chosen_fun.start()
def check_existence(self):
"""this function runs if the app doesn't exist, so it keeps checking and breaks if the app got installed"""
while True:
self.app_exist = cli_code()
if self.app_exist:
self.test_connectivity.start()
break
time.sleep(15)
def check_status(self):
"""This function checks the status of the connection of the app"""
while True:
s = app_output()
img_exist = self.indicator.get_icon()
if True not in s:
if img_exist != error_image:
self.indicator.set_icon(error_image)
elif img_exist != working_image:
self.indicator.set_icon(working_image)
time.sleep(3)
@staticmethod
def express_status():
s = get_status()
notify.Notification.new(f"ExpressVpn Status", s, working_image).show()
@staticmethod
def connect_smart():
os.system("expressvpn connect")
@staticmethod
def connect_stop():
os.system("expressvpn disconnect")
@staticmethod
def quit(_):
notify.uninit()
gtk.main_quit()
exit(0)
def build_menu(self):
menu = gtk.Menu()
status_btn = gtk.MenuItem(label='Express status')
status_btn.connect('activate', self.do_func, 'express_status')
connect_smart_btn = gtk.MenuItem(label='Express Connect')
connect_smart_btn.connect('activate', self.do_func, 'connect_smart')
connect_stop_btn = gtk.MenuItem(label='Express Disable')
connect_stop_btn.connect('activate', self.do_func, "connect_stop")
quit_btn = gtk.MenuItem(label='quit')
quit_btn.connect('activate', self.quit)
menu.append(status_btn)
menu.append(connect_smart_btn)
menu.append(connect_stop_btn)
menu.append(quit_btn)
menu.show_all()
return menu
if __name__ == "__main__":
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("AppIndicator3", "0.1")
gi.require_version('Notify', '0.7')
from gi.repository import Gtk as gtk
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Notify as notify
ExpressStatus()
|
from __future__ import division
import torch
from torch_cluster import neighbor_sampler
from torch_geometric.utils import degree
from torch_geometric.utils.repeat import repeat
from .data import size_repr
class Block(object):
def __init__(self, n_id, e_id, edge_index, size):
self.n_id = n_id
self.e_id = e_id
self.edge_index = edge_index
self.size = size
def __repr__(self):
info = [(key, getattr(self, key)) for key in self.__dict__]
info = ['{}={}'.format(key, size_repr(item)) for key, item in info]
return '{}({})'.format(self.__class__.__name__, ', '.join(info))
class DataFlow(object):
def __init__(self, n_id, flow='source_to_target'):
self.n_id = n_id
self.flow = flow
self.__last_n_id__ = n_id
self.blocks = []
@property
def batch_size(self):
return self.n_id.size(0)
def append(self, n_id, e_id, edge_index):
i, j = (0, 1) if self.flow == 'target_to_source' else (1, 0)
size = [None, None]
size[i] = self.__last_n_id__.size(0)
size[j] = n_id.size(0)
block = Block(n_id, e_id, edge_index, tuple(size))
self.blocks.append(block)
self.__last_n_id__ = n_id
def __len__(self):
return len(self.blocks)
def __getitem__(self, idx):
return self.blocks[::-1][idx]
def __iter__(self):
for block in self.blocks[::-1]:
yield block
def to(self, device):
for block in self.blocks:
block.edge_index = block.edge_index.to(device)
return self
def __repr__(self):
n_ids = [self.n_id] + [block.n_id for block in self.blocks]
sep = '<-' if self.flow == 'source_to_target' else '->'
info = sep.join([str(n_id.size(0)) for n_id in n_ids])
return '{}({})'.format(self.__class__.__name__, info)
class NeighborSampler(object):
r"""The neighbor sampler from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper which iterates
over graph nodes in a mini-batch fashion and constructs sampled subgraphs
of size :obj:`num_hops`.
It returns a generator of :obj:`DataFlow` that defines the message
passing flow to the root nodes via a list of :obj:`num_hops` bipartite
graph objects :obj:`edge_index` and the initial start nodes :obj:`n_id`.
Args:
data (torch_geometric.data.Data): The graph data object.
size (int or float or [int] or [float]): The number of neighbors to
sample (for each layer). The value of this parameter can be either
set to be the same for each neighborhood or percentage-based.
num_hops (int): The number of layers to sample.
batch_size (int, optional): How many samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch. (default: :obj:`False`)
drop_last (bool, optional): If set to :obj:`True`, will drop the last
incomplete batch if the number of nodes is not divisible by the
batch size. If set to :obj:`False` and the size of graph is not
divisible by the batch size, the last batch will be smaller.
(default: :obj:`False`)
add_self_loops (bool, optional): If set to :obj:`True`, will add
self-loops to each sampled neigborhood. (default: :obj:`False`)
flow (string, optional): The flow direction of message passing
(:obj:`"source_to_target"` or :obj:`"target_to_source"`).
(default: :obj:`"source_to_target"`)
"""
def __init__(self,
data,
size,
num_hops,
batch_size=1,
shuffle=False,
drop_last=False,
add_self_loops=False,
flow='source_to_target'):
self.data = data
self.size = repeat(size, num_hops)
self.num_hops = num_hops
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
self.add_self_loops = add_self_loops
self.flow = flow
assert flow in ['source_to_target', 'target_to_source']
self.i, self.j = (0, 1) if flow == 'target_to_source' else (1, 0)
self.edge_index_i, self.e_assoc = data.edge_index[self.i].sort()
self.edge_index_j = data.edge_index[self.j, self.e_assoc]
deg = degree(self.edge_index_i, data.num_nodes, dtype=torch.long)
self.cumdeg = torch.cat([deg.new_zeros(1), deg.cumsum(0)])
self.tmp = torch.empty(data.num_nodes, dtype=torch.long)
def __get_batches__(self, subset=None):
r"""Returns a list of mini-batches from the initial nodes in
:obj:`subset`."""
if subset is None and not self.shuffle:
subset = torch.arange(self.data.num_nodes, dtype=torch.long)
elif subset is None and self.shuffle:
subset = torch.randperm(self.data.num_nodes)
else:
if subset.dtype == torch.uint8:
subset = subset.nonzero().view(-1)
if self.shuffle:
subset = subset[torch.randperm(subset.size(0))]
subsets = torch.split(subset, self.batch_size)
if self.drop_last and subsets[-1].size(0) < self.batch_size:
subsets = subsets[:-1]
assert len(subsets) > 0
return subsets
def __produce__(self, n_id):
r"""Produces a :obj:`DataFlow` object for a given mini-batch
:obj:`n_id`."""
data_flow = DataFlow(n_id, self.flow)
for l in range(self.num_hops):
e_id = neighbor_sampler(n_id, self.cumdeg, self.size[l])
new_n_id = self.edge_index_j.index_select(0, e_id)
if self.add_self_loops:
new_n_id = torch.cat([new_n_id, n_id], dim=0)
new_n_id = new_n_id.unique(sorted=False)
e_id = self.e_assoc[e_id]
edges = [None, None]
edge_index_i = self.data.edge_index[self.i, e_id]
if self.add_self_loops:
edge_index_i = torch.cat([edge_index_i, n_id], dim=0)
self.tmp[n_id] = torch.arange(n_id.size(0))
edges[self.i] = self.tmp[edge_index_i]
edge_index_j = self.data.edge_index[self.j, e_id]
if self.add_self_loops:
edge_index_j = torch.cat([edge_index_j, n_id], dim=0)
self.tmp[new_n_id] = torch.arange(new_n_id.size(0))
edges[self.j] = self.tmp[edge_index_j]
edge_index = torch.stack(edges, dim=0)
# Remove the edge identifier when adding self-loops to prevent
# misused behavior.
e_id = None if self.add_self_loops else e_id
n_id = new_n_id
data_flow.append(n_id, e_id, edge_index)
return data_flow
def __call__(self, subset=None):
r"""Returns a generator of :obj:`DataFlow` that iterates over the nodes
in :obj:`subset` in a mini-batch fashion.
Args:
subset (LongTensor or ByteTensor, optional): The initial nodes to
propagete messages to. If set to :obj:`None`, will iterate over
all nodes in the graph. (default: :obj:`None`)
"""
for n_id in self.__get_batches__(subset):
data_flow = self.__produce__(n_id)
yield data_flow
|
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from functools import partial
def cube():
glBegin(GL_LINE_STRIP)
# calculate all points
points = []
for x in range(8):
points.append(((x & 4) >> 2, (x & 2) >> 1, x & 1))
# traverse path
paths = (0, 1, 3, 2, 0, 4, 6, 7, 3, 2, 6, 4, 5, 7, 5, 1)
for pt_idx in paths:
glVertex3fv(points[pt_idx])
glEnd()
def main():
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
pygame.key.set_repeat(20, 20)
gluPerspective(90, (display[0] / display[1]), 0.1, 50.0)
glTranslatef(0.0, 0.0, -5.0)
fns = {
pygame.K_UP: partial(glTranslatef, 0.0, 0.0, -0.1),
pygame.K_DOWN: partial(glTranslatef, 0.0, 0.0, 0.1),
pygame.K_LEFT: partial(glRotatef, 1.0, 1.0, 1.0, 1.0),
pygame.K_RIGHT: partial(glRotatef, -1.0, 1.0, 1.0, 1.0),
}
while True:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
cube()
pygame.display.flip()
event = pygame.event.wait()
if event.type == pygame.KEYDOWN:
try:
fns[event.key]()
except KeyError:
continue
elif event.type == pygame.QUIT:
pygame.quit()
quit()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# encoding:utf-8
import unittest
import time
from datetime import datetime
from app import create_app, db
from app.models import User, AnonymousUser
__author__ = 'zhangmm'
class UserModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertFalse(u.verify_password('dog'))
def test_password_salts_are_random(self):
u1 = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u1.password_hash != u2.password_hash)
def test_timestamps(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
self.assertTrue((datetime.utcnow() - u.member_since).total_seconds() < 3)
self.assertTrue((datetime.utcnow() - u.last_seen).total_seconds() < 3)
def test_ping(self):
u = User(password='cat')
db.session.add(u)
db.session.commit()
time.sleep(2)
last_seen_before = u.last_seen
u.ping()
self.assertTrue(u.last_seen > last_seen_before)
def test_gravatar(self):
u = User(email='zhangmin6105@qq.com', password='cat')
with self.app.test_request_context('/'):
gravatar = u.gravatar()
gravatar_256 = u.gravatar(size=256)
gravatar_pg = u.gravatar(rating='pg')
gravatar_retro = u.gravatar(default='retro')
with self.app.test_request_context('/', base_url='https://zhangmm.cn'):
gravatar_ssl = u.gravatar()
self.assertTrue('http://www.gravatar.com/avatar/7c1d6daaabe72a964b3a42609e3f2d2b' in gravatar)
self.assertTrue('s=256' in gravatar_256)
self.assertTrue('r=pg' in gravatar_pg)
self.assertTrue('d=retro' in gravatar_retro)
self.assertTrue('https://secure.gravatar.com/avatar/7c1d6daaabe72a964b3a42609e3f2d2b' in gravatar_ssl)
|
from .. import db
import datetime
class ChallengeSettings(db.Model):
"""
[summary]
Args:
ChallengeSettingsMixin ([type]): [description]
db ([type]): [description]
"""
__tablename__ = "challenge_settings"
id = db.Column(db.Integer, primary_key=True)
language_name = db.Column(db.String(20), nullable=False)
time_limit = db.Column(db.Integer, nullable=False)
memory_limit = db.Column(db.Integer, nullable=False)
created_at = db.Column(db.DateTime(timezone=False),
nullable=False, default=datetime.datetime.now())
challenge_id = db.Column(db.Integer, db.ForeignKey(
'challenges.id'), nullable=False)
|
# Print solutions
import numpy as np
def printBest(data, solution):
print()
# SAVE OPTIMAL SOLUTION
data['best_obj'] = solution.get_objective_value()
data['best_facilities'] = data['y']
for i in range(data['I_tot_exp']):
data['best_facilities'][i] = solution.get_values('y[' + str(i) + ']')
# RETRIEVE MAX UTILITIES OF CUSTOMERS
data['UMax'] = np.zeros([data['N'], data['R']])
data['choice'] = np.zeros([data['N'], data['R']], dtype=int)
data['x'] = np.zeros([data['I_tot_exp'], data['N'], data['R']])
for n in range(data['N']):
for r in range(data['R']):
data['UMax'][n,r] = np.max(data['U'][:,n,r] * data['best_facilities'][:])
data['choice'][n,r] = np.argmax(data['U'][:,n,r] * data['best_facilities'][:])
data['x'][data['choice'][n,r],n,r] = 1.0
# RETRIEVE DEMAND
data['best_demand'] = np.zeros([data['I_tot_exp']])
for i in range(data['I_tot_exp']):
for n in range(data['N']):
for r in range(data['R']):
if data['choice'][n,r] == i:
data['best_demand'][i] += (data['popN'][n]/data['R'])
### PRINT FACILITIES, PRICES, DEMANDS, PROFITS
print('\nObjective function value : {:12.4f}'.format(data['best_obj']))
print('\nAlt Name Supplier Facility Price Demand Market share')
for i in range(data['I_tot_exp']):
print('{:3d} {:6s} {:2d} {:4.0f} {:7.4f} {:8.3f} {:7.4f}'
.format(i, data['name_mapping'][data['alt'][i]], data['operator'][data['alt'][i]],
data['best_facilities'][i], data['p'][i],
data['best_demand'][i], data['best_demand'][i] / data['Pop']))
### PRINT SUBPROBLEM CONTRIBUTIONS
data['z_opt'] = np.zeros([data['N'], data['R']])
for r in range(data['R']):
for n in range(data['N']):
if data['PB_RetainedInMaster'][r] == 0:
data['z_opt'][n,r] = solution.get_values('z[' + str(n) + ']' + '[' + str(r) + ']')
else:
data['z_opt'][n,r] = -1
### PRINT CHOICES IN RETAINED PROBLEMS
data['x_opt'] = np.zeros([data['I_tot_exp'], data['N'], data['R']])
for r in range(data['R']):
for n in range(data['N']):
for i in range(data['I_tot_exp']):
if data['PB_RetainedInMaster'][r] == 1:
data['x_opt'][i,n,r] = solution.get_values('x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']')
else:
data['x_opt'][i,n,r] = -1
print()
def printAllSolutions(data):
print('\n\nALL INTEGER SOLUTIONS FOUND IN BRANCH-AND-BOUND TREE')
print(' Sol ', end='')
for i in range(data['I_tot_exp']):
print('{:2d}'.format(i), end='')
for sol in range(len(data['all_y'])):
print('\n{:5d} '.format(sol), end='')
for i in range(data['I_tot_exp']):
print('{:2.0f}'.format(data['all_y'][sol][i]), end='')
print() |
answer = input("Are you feeling happy today?")
if answer == "yes":
print("Glad to hear that! Keep pressing on!")
if answer == "no":
print("Oh no! Hope you overcome your problems soon and remember to never give up!")
|
import calendar
from datetime import datetime
from corehq.apps.products.models import SQLProduct
from corehq.apps.locations.models import get_location, SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumnGroup, DataTablesColumn
from corehq.apps.reports.standard import MonthYearMixin
from corehq.apps.reports.sqlreport import DataFormatter, DictDataFormat
from corehq.util.translation import localize
from custom.intrahealth.sqldata import NombreData, TauxConsommationData
from django.utils.translation import ugettext as _
from memoized import memoized
from dimagi.utils.parsing import json_format_date
def get_localized_months():
#Returns chronological list of months in french language
with localize('fr'):
return [(_(calendar.month_name[i])).title() for i in range(1, 13)]
def change_id_keys_to_names(domain, dict_with_id_keys):
dict_with_name_keys = {}
for id, data in dict_with_id_keys.items():
try:
name = SQLLocation.objects.get(domain=domain,
location_id=id).name
except SQLLocation.DoesNotExist:
name = id
dict_with_name_keys[name] = data
return dict_with_name_keys
class YeksiNaaMonthYearMixin(MonthYearMixin):
@property
def month(self):
if 'month' in self.request.GET:
return int(self.request.GET['month'])
else:
return datetime.utcnow().month
@property
def year(self):
if 'year' in self.request.GET:
return int(self.request.GET['year'])
else:
return datetime.utcnow().year
class IntraHealthLocationMixin(object):
@property
@memoized
def location(self):
if self.request.GET.get('location_id'):
return get_location(self.request.GET.get('location_id'))
class IntraHealthReportConfigMixin(object):
def config_update(self, config):
if self.request.GET.get('location_id', ''):
if self.location.location_type_name.lower() == 'district':
config.update(dict(district_id=self.location.location_id))
else:
config.update(dict(region_id=self.location.location_id))
@property
def report_config(self):
config = dict(
domain=self.domain,
startdate=self.datespan.startdate.replace(hour=0, minute=0, second=0),
enddate=self.datespan.enddate.replace(hour=23, minute=59, second=59),
visit="''",
strsd=json_format_date(self.datespan.startdate),
stred=json_format_date(self.datespan.enddate),
empty_prd_code='__none__',
zero=0
)
self.config_update(config)
return config
class IntraHealtMixin(IntraHealthLocationMixin, IntraHealthReportConfigMixin):
model = None
data_source = None
groups = []
no_value = {'sort_key': 0, 'html': 0}
PRODUCT_NAMES = {
'Preservatif Feminin': 'Préservatif Féminin',
'Preservatif Masculin': 'Préservatif Masculin',
'Depo-Provera': 'Dépo-Provera',
}
def _safe_get(self, dictionary, element):
return dictionary[element] if element in dictionary else None
@property
def headers(self):
header = DataTablesHeader()
columns = self.model.columns
if self.model.have_groups:
header.add_column(DataTablesColumnGroup('', columns[0].data_tables_column))
else:
header.add_column(columns[0].data_tables_column)
self.groups = SQLProduct.objects.filter(domain=self.domain, is_archived=False).order_by('code')
for group in self.groups:
if self.model.have_groups:
header.add_column(DataTablesColumnGroup(group.name,
*[columns[j].data_tables_column for j in range(1, len(columns))]))
else:
header.add_column(DataTablesColumn(group.name))
return header
@property
def rows(self):
data = self.model.data
if isinstance(self.model, (NombreData, TauxConsommationData)):
localizations = sorted(set(key[0] for key in data))
else:
localizations = sorted(set(key[1] for key in data))
rows = []
formatter = DataFormatter(DictDataFormat(self.model.columns, no_value=self.no_value))
if isinstance(self.data_source, (NombreData, TauxConsommationData)):
result = {}
ppss = set()
for k, v in data.items():
ppss.add(k[-2])
if 'region_id' in self.data_source.config:
helper_tuple = (k[2], k[1], k[0])
else:
helper_tuple = (k[1], k[0])
result[helper_tuple] = v
if 'region_id' in self.data_source.config:
result_sum = {}
for localization in localizations:
for pps in ppss:
for group in self.groups:
if(group.product_id, localization) in result_sum:
r = result_sum[(group.product_id, localization)]
cols = self.data_source.sum_cols
for col in cols:
r[col] += result.get((group.product_id, pps, localization), {col: 0})[col]
else:
helper_dict = {}
for col in self.data_source.sum_cols:
helper_dict[col] = 0
helper_dict['district_name'] = localization
result_sum[(group.product_id, localization)] = result.get(
(group.product_id, pps, localization), helper_dict)
result = result_sum
data = dict(formatter.format(result, keys=self.model.keys, group_by=self.model.group_by))
else:
data = dict(formatter.format(self.model.data, keys=self.model.keys, group_by=self.model.group_by))
reversed_map = dict(zip(list(self.PRODUCT_NAMES.values()), list(self.PRODUCT_NAMES.keys())))
for localization in localizations:
row = [localization]
for group in self.groups:
if (group.product_id, localization) in data:
product = data[(group.product_id, localization)]
row.extend([product[p] for p in self.model.col_names])
elif (self._safe_get(reversed_map, group.product_id), localization) in data:
product = data[(reversed_map[group.product_id], localization)]
row.extend([product[p] for p in self.model.col_names])
else:
row.extend([self.no_value for p in self.model.col_names])
rows.append(row)
return rows
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
import unittest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, live_only)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@live_only()
class ContainerappIdentityTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_identity_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
user_identity_name = self.create_random_name(prefix='containerapp', length=24)
self.cmd('containerapp env create -g {} -n {}'.format(resource_group, env_name))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name))
self.cmd('containerapp identity assign --user-assigned {} -g {} -n {}'.format(user_identity_name, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_identity_system(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
self.cmd('containerapp env create -g {} -n {}'.format(resource_group, env_name))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {} --system-assigned'.format(resource_group, ca_name, env_name))
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_identity_user(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
user_identity_name1 = self.create_random_name(prefix='containerapp-user1', length=24)
user_identity_name2 = self.create_random_name(prefix='containerapp-user2', length=24)
self.cmd('containerapp env create -g {} -n {}'.format(resource_group, env_name))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name1))
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name2))
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity assign --user-assigned {} {} -g {} -n {}'.format(user_identity_name1, user_identity_name2, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name2, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
|
# Generated by Django 2.1.4 on 2019-10-03 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0032_auto_20190729_1249'),
]
operations = [
migrations.AddField(
model_name='ocrmodel',
name='job',
field=models.PositiveSmallIntegerField(choices=[(1, 'Segment'), (2, 'Recognize')], default=2),
preserve_default=False,
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 Marcel Bollmann <marcel@bollmann.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]
Creates .bib files for all papers in the Hugo directory.
Options:
--importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
--exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]
--debug Output debug-level log messages.
-c, --clean Delete existing files in target directory before generation.
-h, --help Display this helpful text.
"""
from docopt import docopt
from lxml import etree
from tqdm import tqdm
import gzip
import logging as log
import io
import os
from anthology import Anthology
from anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year
from create_hugo_pages import check_directory
from operator import itemgetter
def volume_sorter(volume_tuple):
"""
Extracts the year so that we can sort by the year and then
the collection ID.
"""
volume_id = volume_tuple[0]
collection_id, year, _ = deconstruct_anthology_id(volume_id)
year = infer_year(collection_id)
return year, volume_id
def create_bibtex(anthology, trgdir, clean=False):
"""Creates .bib files for all papers."""
if not check_directory("{}/papers".format(trgdir), clean=clean):
return
if not check_directory("{}/volumes".format(trgdir), clean=clean):
return
log.info("Creating BibTeX files for all papers...")
with open(
"{}/anthology.bib".format(trgdir), "wt", encoding="utf-8"
) as file_anthology_raw, gzip.open(
"{}/anthology.bib.gz".format(trgdir), "wt", encoding="utf-8"
) as file_anthology, gzip.open(
"{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8"
) as file_anthology_with_abstracts:
for volume_id, volume in tqdm(
sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)
):
volume_dir = trgdir
if not os.path.exists(volume_dir):
os.makedirs(volume_dir)
with open("{}/volumes/{}.bib".format(trgdir, volume_id), "w") as file_volume:
for paper in volume:
with open(
"{}/{}.bib".format(volume_dir, paper.full_id), "w"
) as file_paper:
contents = paper.as_bibtex()
print(contents, file=file_paper)
print(contents, file=file_anthology_with_abstracts)
concise_contents = paper.as_bibtex(concise=True)
print(concise_contents, file=file_volume)
print(concise_contents, file=file_anthology)
print(concise_contents, file=file_anthology_raw)
if __name__ == "__main__":
args = docopt(__doc__)
scriptdir = os.path.dirname(os.path.abspath(__file__))
if "{scriptdir}" in args["--importdir"]:
args["--importdir"] = os.path.abspath(
args["--importdir"].format(scriptdir=scriptdir)
)
if "{scriptdir}" in args["--exportdir"]:
args["--exportdir"] = os.path.abspath(
args["--exportdir"].format(scriptdir=scriptdir)
)
log_level = log.DEBUG if args["--debug"] else log.INFO
log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level)
tracker = SeverityTracker()
log.getLogger().addHandler(tracker)
anthology = Anthology(importdir=args["--importdir"])
create_bibtex(anthology, args["--exportdir"], clean=args["--clean"])
if tracker.highest >= log.ERROR:
exit(1)
|
# -*- coding: utf-8 -*-
u"""
Created on 2015-7-13
@author: cheng.li
"""
from PyFin.tests import api
from PyFin.tests import DateUtilities
from PyFin.tests import Env
from PyFin.tests import Math
from PyFin.tests import PricingEngines
from PyFin.tests import Analysis
__all__ = ["api",
"DateUtilities",
"Env",
"Math",
"PricingEngines",
"Analysis"]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
from tracker.multitracker import JDETracker
from tracking_utils import visualization as vis
from tracking_utils.utils import mkdir_if_missing
# from opts import opts
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,0\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
def eval_seq(opt, data_path, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
tracker = JDETracker(opt)
# timer = Timer()
results = []
frame_id = 0
frame_nums = 60 #len(os.listdir(data_path))//2
#np_res = []
for _ in range(frame_nums):
frame_id += 1
dets = np.loadtxt(os.path.join(data_path, str(frame_id) + '.txt'), dtype=np.float32, delimiter=',')
online_targets = tracker.update(dets)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
if tlwh[2] * tlwh[3] > opt.min_box_area and tlwh[2] / tlwh[3] < 1.6:
online_tlwhs.append(tlwh)
online_ids.append(tid)
# np_res.append([frame_id,tid,tlwh[0],tlwh[1],tlwh[2],tlwh[3],1,0])
## save results
results.append((frame_id, online_tlwhs, online_ids))
if show_image or save_dir is not None:
if save_dir:
mkdir_if_missing(save_dir)
img = cv2.imread(os.path.join(data_path, str(frame_id) + '.jpg'))
online_im = vis.plot_tracking(img, online_tlwhs, online_ids, frame_id=frame_id)
# if show_image:
# cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
# save results
write_results(result_filename, results, data_type)
# np.savetxt(result_filename, np.array(np_res), fmt='%d,%d,%0.1f,%0.1f,%0.1f,%0.1f,%0.1f,%d', delimiter=',')
|
# pylint: disable=invalid-name
"""
Module for obtaining quadrature points and weights.
Three types of gaussian quadrature are supported: normal Gaussian,
Radau quadrature, and Lobatto quadrature. The first method does not
include either endpoint of integration, Radau quadrature includes one
endpoint of the integration range, and Lobatto quadrature includes both
endpoints.::
import iad.quadrature
n=8
x, w = iad.quadrature.gauss(n)
print(" i x weight")
for i,x in enumerate(xi):
print("%2d %+.12f %+.12f" % (i, x[i], w[i]))
x, w = iad.quadrature.radau(n)
print(" i x weight")
for i,x in enumerate(xi):
print("%2d %+.12f %+.12f" % (i, x[i], w[i]))
x, w = iad.quadrature.lobatto(n)
print(" i x weight")
for i,x in enumerate(xi):
print("%2d %+.12f %+.12f" % (i, x[i], w[i]))
"""
import functools
import scipy.special
import scipy.optimize
import numpy as np
__all__ = ('gauss',
'radau',
'lobatto',
)
def _gauss_func(n, x):
"""Zeroes of this function are the Gaussian quadrature points."""
return scipy.special.legendre(n)(x)
def _radau_func(n, x):
"""Zeros of this function are the Radau quadrature points."""
return (scipy.special.eval_legendre(n - 1, x) + scipy.special.eval_legendre(n, x)) / (1 + x)
def _lobatto_func(n, x):
"""Zeros of this function are the Lobatto quadrature points."""
return scipy.special.legendre(n - 1).deriv(1)(x)
def gauss(n, a=-1, b=1):
"""
Return abscissas and weights for Gaussian quadrature.
The definite integral ranges from a to b. The default
interval is -1 to 1. The quadrature approximation is
just the sum of w_i f(x_i). Neither a nor b is included
in the list of quadrature abscissas.
The result should be exact when integrating any polynomial
of degree 2n-1 or less.
If -a=b, then abscissas will be symmetric about the origin
Args:
n: number of quadrature points
a: lower limit of integral
b: upper limit of integral
Returns:
x: array of abscissas of length n
w: array of weights of length n
"""
x, w, _ = scipy.special.roots_legendre(n, mu=True)
# scale for desired interval
x *= 0.5 * (a - b)
x += 0.5 * (a + b)
w *= 0.5 * (b - a)
return np.flip(x), np.flip(w)
def radau(n, a=-1, b=1):
"""
Return abscissas and weights for Radau quadrature.
The definite integral ranges from a to b. The default
interval is -1 to 1. The quadrature approximation is
just the sum of w_i f(x_i). The upper endpoint b is include
in the list of quadrature abscissas.
The result should be exact when integrating any polynomial
of degree 2n-2 or less.
Args:
n: number of quadrature points
a: lower limit of integral
b: upper limit of integral
Returns:
x: array of abscissas of length n
w: array of weights of length n
"""
x = np.zeros(n)
w = np.zeros(n)
x[0] = -1
# the roots of P_{n} bracket the roots of P_{n}':
brackets = scipy.special.roots_legendre(n)[0]
f = functools.partial(_radau_func, n)
for i in range(n - 1):
x[i + 1] = scipy.optimize.brentq(f, brackets[i], brackets[i + 1])
pp = scipy.special.legendre(n - 1).deriv(1)
w[0] = 2 / n**2
w[1:] = 1 / pp(x[1:])**2 / (1 - x[1:])
# scale for desired interval
x *= 0.5 * (a - b)
x += 0.5 * (b + a)
w *= 0.5 * (b - a)
return np.flip(x), np.flip(w)
def lobatto(n, a=-1, b=1):
"""
Return abscissas and weights for Lobatto quadrature.
The definite integral ranges from a to b. The default
interval is -1 to 1. The quadrature approximation is
just the sum of w_i f(x_i). Both endpoints a and b are include
in the list of quadrature abscissas.
The result should be exact when integrating any polynomial
of degree 2n-3 or less.
If -a=b, then abscissas will be symmetric about the origin
Args:
n: number of quadrature points
a: lower limit of integral
b: upper limit of integral
Returns:
x: array of abscissas of length n
w: array of weights of length n
"""
x = np.zeros(n)
w = np.full(n, 2 / n / (n - 1))
x[0] = -1
x[-1] = 1
# The roots of P_{n-1} bracket the roots of P_{n-1}':
brackets = scipy.special.roots_legendre(n - 1)[0]
f = functools.partial(_lobatto_func, n)
for i in range(n - 2):
x[i + 1] = scipy.optimize.brentq(f, brackets[i], brackets[i + 1])
pp = scipy.special.legendre(n - 1)(x)
w[1:-1] = w[1:-1] / pp[1:-1]**2
# scale for desired interval
x *= 0.5 * (a - b)
x += 0.5 * (a + b)
w *= 0.5 * (b - a)
return np.flip(x), np.flip(w)
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'thelogger',
packages = ['thelogger', 'thelogger.notify'],
version = 'v0.2.6',
license='Apache 2.0',
description = 'Easy logging, timing and email notifications of code execution.',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'Tom1919',
author_email = 'py.notify1@gmail.com',
url = 'https://github.com/tom1919/TheLogger',
download_url = 'https://github.com/tom1919/TheLogger/archive/refs/tags/v0.2.6.tar.gz',
keywords = ['log', 'logging', 'logger', 'email', 'timimg', 'notification'],
install_requires = ['pandas','tabulate'],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import gzip
import glob
import logging
import time
import collections as cs
from functools import partial
from optparse import OptionParser, Values
import multiprocessing as mp
# import multiprocessing.dummy as mt
import threading as mt
import queue as q
from tqdm import tqdm
# protoc --python_out=. ./appsinstalled.proto
# pip install protobuf
import appsinstalled_pb2
# pip install python-memcached
import memcache
MEMCACHE_TIMEOUT = 4. # secs
MEMCACHE_ATTEMPTS = 2
MEMCACHE_RETRY_TIMEOUT = 0.1
WORKER_STOP = 'beer'
WORKER_BUFFER_RECORDS = 128
NORMAL_ERR_RATE = 0.01
AppsInstalled = cs.namedtuple("AppsInstalled", ["dev_type", "dev_id", "lat", "lon", "apps"])
# if 'line_profiler' not in dir() and 'profile' not in dir():
# def profile(func):
# return func
def dot_rename(path):
head, fn = os.path.split(path)
# atomic in most cases
os.rename(path, os.path.join(head, "." + fn))
def parse_appsinstalled(line: bytes):
line_parts = list(map(bytes.strip, line.split(b'\t')))
if len(line_parts) < 5:
return
dev_type, dev_id, lat, lon, raw_apps = line_parts
if not dev_type or not dev_id:
return
try:
apps = tuple(int(a.strip()) for a in raw_apps.split(b','))
except ValueError:
apps = tuple([int(a.strip()) for a in raw_apps.split(b',') if a.isidigit()])
logging.info("Not all user apps are digits: `%s`" % line)
try:
lat, lon = float(lat), float(lon)
except ValueError:
logging.info("Invalid geo coords: `%s`" % line)
return AppsInstalled(dev_type, dev_id, lat, lon, apps)
def pack_appsinstalled(appsinstalled):
ua = appsinstalled_pb2.UserApps()
ua.lat = appsinstalled.lat
ua.lon = appsinstalled.lon
ua.apps.extend(appsinstalled.apps)
return b'%s:%s' % (appsinstalled.dev_type, appsinstalled.dev_id),\
ua.SerializeToString()
#@profile
def memc_worker(memc: memcache.Client, tasks: q.Queue, results: q.Queue, is_dry: bool = False, attempts: int = MEMCACHE_ATTEMPTS, retry_timeout: float = MEMCACHE_RETRY_TIMEOUT):
processed = errors = 0
cp = mp.current_process()
ct = mt.current_thread()
logging.debug('-%s:%s- start processing memcache on %s', cp.name, ct.name, memc.servers[0])
while True:
try:
records = tasks.get() # timeout = 0.01
if records == WORKER_STOP:
logging.debug('-%s:%s- put results from %s: %d / %d', cp.name, ct.name, memc.servers[0], errors, processed)
results.put((processed, errors))
logging.debug('-%s:%s- stop processing memcache on %s: %d / %d', cp.name, ct.name, memc.servers[0], errors, processed)
break
if is_dry:
for key, value in records.items():
logging.debug("-%s:%s- %s - %s -> %s" % (cp.name, ct.name, memc.servers[0], key, value[:64].replace(b'\n', b' ')))
processed += 1
continue
retry = 0
error_keys = memc.set_multi(records, noreply=False)
while retry < attempts:
error_keys = memc.set_multi({key: records[key] for key in error_keys})
retry += 1
time.sleep(retry_timeout)
errors += len(error_keys)
processed += len(records)
logging.debug('-%s:%s- saving into memcache %s: %d / %d', cp.name, ct.name, memc.servers[0], errors, processed)
except BaseException as err:
logging.exception('-%s:%s- Unexpected exception [%s] occurred while processing memcache on %s' % (cp.name, ct.name, err, memc.servers[0]))
#@profile
def process_file(options: Values, args: tuple[int, str]) -> tuple[str, int, int]:
idx, fn = args
cp = mp.current_process()
logging.info('Processing %s' % fn)
processed = errors = 0
if (f_size := os.path.getsize(fn)):
device_memc = {
b'idfa': options.idfa,
b'gaid': options.gaid,
b'adid': options.adid,
b'dvid': options.dvid,
}
tasks = {}
workers = []
results = q.Queue()
buffers = cs.defaultdict(dict)
for dev_type, memc_addr in device_memc.items():
memc = memcache.Client([memc_addr], socket_timeout=MEMCACHE_TIMEOUT)
tasks[dev_type] = q.Queue()
worker = mt.Thread(
target=memc_worker,
args=(memc, tasks[dev_type], results, options.dry),
daemon=False,
)
workers.append(worker)
worker.start()
with tqdm(desc=f'{fn: <64}', total=f_size, unit='b', unit_scale=True, unit_divisor=1024, position=idx, ) as pbar:
with open(fn, 'rb') as f, gzip.open(f, 'rb') as gz: #, io.BufferedReader(gz) as bf:
for line in gz:
pbar.n = f.tell()
pbar.update(1)
line = line.strip() # processing in bytes
if not line:
continue
appsinstalled = parse_appsinstalled(line)
if not appsinstalled:
logging.error("-%s- Invalid appsinstalled record: %s" % (cp.name, line))
errors += 1
continue
dev_type = appsinstalled.dev_type
if dev_type not in device_memc:
errors += 1
logging.error('-%s- Unknown device type: %s' % (cp.name, dev_type))
continue
key, value = pack_appsinstalled(appsinstalled)
buffers[dev_type][key] = value
if len(buffers[dev_type]) >= WORKER_BUFFER_RECORDS:
logging.debug('-%s- put %d records of type %s for memcache on %s', cp.name, len(buffers[dev_type]), dev_type, device_memc[dev_type])
tasks[dev_type].put(buffers[dev_type])
buffers[dev_type] = {}
# flush beffers
for dev_type, records in buffers.items():
if records:
tasks[dev_type].put(records)
# stop workers
logging.debug('-%s- stopping workers: %d', cp.name, len(tasks))
for task in tasks.values():
task.put(WORKER_STOP)
# join workers
logging.debug('-%s- joining workers: %d', cp.name, len(workers))
for worker in workers:
worker.join()
# calc results
logging.debug('-%s- starting results processing: %s', cp.name, results.empty())
while not results.empty():
result = results.get() # or timeout=0.01
logging.debug('get result: %s', result)
processed += result[0]
errors += result[1]
pbar.refresh()
return fn, processed, errors
#@profile
def main(options: Values):
logging.info("start processing with options: %s", options)
with mp.Pool(
processes=options.workers,
initializer=tqdm.set_lock,
initargs=(tqdm.get_lock(),)
) as ps_pool:
pf = partial(process_file, options)
for fn, processed, errors in ps_pool.imap_unordered(pf, enumerate(sorted(glob.iglob(options.pattern)))):
logging.debug("%s: %d - ok, %d - errs", fn, processed, errors)
err_rate = float(errors) / processed if processed else 1
if err_rate < NORMAL_ERR_RATE:
logging.info("Processed (%d) records. Acceptable error rate (%s). Successfull load %s", processed, err_rate, fn)
else:
logging.error("Processed (%d) records. High error rate (%s > %s). Failed load %s", processed, err_rate, NORMAL_ERR_RATE, fn)
dot_rename(fn)
def prototest():
sample = b"idfa\t1rfw452y52g2gq4g\t55.55\t42.42\t1423,43,567,3,7,23\ngaid\t7rfw452y52g2gq4g\t55.55\t42.42\t7423,424"
for line in sample.splitlines():
dev_type, dev_id, lat, lon, raw_apps = line.strip().split(b"\t")
apps = [int(a) for a in raw_apps.split(b",") if a.isdigit()]
lat, lon = float(lat), float(lon)
ua = appsinstalled_pb2.UserApps()
ua.lat = lat
ua.lon = lon
ua.apps.extend(apps)
packed = ua.SerializeToString()
unpacked = appsinstalled_pb2.UserApps()
unpacked.ParseFromString(packed)
assert ua == unpacked
if __name__ == '__main__':
op = OptionParser()
op.add_option("-t", "--test", action="store_true", default=False)
op.add_option("-l", "--log", action="store", default=None)
op.add_option("-w", "--workers", type="int", action="store", default=mp.cpu_count()) # dest="workers_count"
op.add_option("--dry", action="store_true", default=False)
op.add_option("--pattern", action="store", default="./data/appsinstalled/*.tsv.gz")
op.add_option("--idfa", action="store", default="127.0.0.1:33013")
op.add_option("--gaid", action="store", default="127.0.0.1:33014")
op.add_option("--adid", action="store", default="127.0.0.1:33015")
op.add_option("--dvid", action="store", default="127.0.0.1:33016")
(opts, args) = op.parse_args()
logging.basicConfig(filename=opts.log, level= logging.INFO if not opts.dry else logging.DEBUG,
format='[%(asctime)s] %(levelname).1s %(message)s', datefmt='%Y.%m.%d %H:%M:%S')
if opts.test:
prototest()
sys.exit(0)
logging.info("Memc loader started with options: %s" % opts)
try:
main(opts)
except Exception as e:
logging.exception("Unexpected error: %s" % e)
sys.exit(1)
|
from setuptools import find_packages, setup
setup(
name="disk-exporter",
version="1.0",
zip_safe=False,
package_dir={"": "src"},
packages=find_packages(where="src"),
include_package_data=True,
py_modules=["twisted.plugins.disk_exporter_dropin"],
install_requires=[
"twisted[tls]",
"prometheus_client",
"psutil",
"nsenter",
"attrs",
],
)
|
import h5py
import numpy as np
import pandas as pd
from argparse import ArgumentParser
def dataframe_to_deepsurv_ds(df, event_col='Event', time_col='Time'):
# Extract the event and time columns as numpy arrays
e = df[event_col].values.astype(np.int32)
t = df[time_col].values.astype(np.float32)
# Extract the patient's covariates as a numpy array
x_df = df.drop([event_col, time_col], axis=1)
x = x_df.values.astype(np.float32)
# Return the deep surv dataframe
return {
'x': x,
'e': e,
't': t
}
def dataframes_to_hd5(df, ofile, event_col, time_col):
with h5py.File(ofile, 'w') as h:
for k in df:
ds = dataframe_to_deepsurv_ds(df[k], event_col, time_col)
group = h.create_group(k)
group.create_dataset('x', data=ds['x'])
group.create_dataset('e', data=ds['e'])
group.create_dataset('t', data=ds['t'])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('ifile')
parser.add_argument('ofile')
parser.add_argument('-e', '--event_col', default='OSEvent')
parser.add_argument('-t', '--time_col', default='TTDy')
parser.add_argument('--txcol', type=str, default='SBRT')
parser.add_argument('--drop', help='drop columns', nargs='+', type=str)
parser.add_argument('--droprows', help='drop rows where [cols] have value --droprowsval', nargs='+', type=str)
parser.add_argument(
'--droprowsval', help='value at which to drop the rows from --droprows, default 1', type=int, default=1)
parser.add_argument('--droprows2', help='drop rows where [cols] have value --droprowsval2', nargs='+', type=str)
parser.add_argument(
'--droprowsval2', help='value at which to drop the rows from --droprows2, default 0', type=int, default=0)
args = parser.parse_args()
print(args)
df = pd.read_csv(args.ifile)
if not args.drop is None:
if not args.droprows is None:
drop_idx = df.where((df.loc[:, args.droprows] == args.droprowsval).any(axis='columns')).dropna().index
df.drop(drop_idx, axis='rows', inplace=True)
if not args.droprows2 is None:
drop_idx = df.where((df.loc[:, args.droprows2] == args.droprowsval2).any(axis='columns')).dropna().index
df.drop(drop_idx, axis='rows', inplace=True)
df.drop(args.drop, axis='columns', inplace=True)
# print(df)
frac = 0.5
df_train_treated = df[df[args.txcol] == 1].sample(frac=frac)
df_train_untreated = df[df[args.txcol] == 0].sample(frac=frac)
df_test_treated = df.loc[df[df[args.txcol] == 1].index.symmetric_difference(df_train_treated.index)]
df_test_untreated = df.loc[df[df[args.txcol] == 0].index.symmetric_difference(df_train_untreated.index)]
df_train = pd.concat([df_train_treated, df_train_untreated])
df_test = pd.concat([df_test_treated, df_test_untreated])
# dataframes = {'train': df_train, 'test': df_test, 'valid': df_test}
# dataframes = {'train': df, 'test': df, 'valid': df}
dataframes = {'train': df_train, 'test': df_test}
# print(df_train)
# print(df_test)
dataframes_to_hd5(dataframes, args.ofile, args.event_col, args.time_col)
|
import cx_Oracle
import math
import pandas as pd
class connection_banner:
"""class to generate connection with databases in Banner.
"""
def init_oracle_database(path_oracle_client):
"""Init connection with oracle database with client.
Args:
path_oracle_client (str): path where is instant client
Returns:
cx_Oracle.init_oracle_client: init oracle instant client
"""
return cx_Oracle.init_oracle_client(lib_dir=path_oracle_client)
def banner_con(user, password, service_name, host, port = '1521'):
"""connection with Banner and easy make dns with cx_Oracle.
Args:
user (str): user to connect.
password (str): password to connect.
service_name (str): service name to connect.
host (str): host to connect.
port (str, optional): port to connect. Defaults to '1521'.
Returns:
cx_Oracle.connect: object to connect with Banner.
"""
BANNER = cx_Oracle.connect(user=user, password=password, dsn=cx_Oracle.makedsn(host, port, service_name=service_name))
return BANNER
def time_process(toc, tic):
"""return process time. import time; tic = time.process_time(); toc = time.process_time()
Args:
toc (float): time in seconds of finshed process
tic (float): time in seconds of begin process
"""
return(
[
''.join(['0',str(math.floor((toc - tic)/60/60))]) if math.floor((toc - tic)/60/60) < 10 else math.floor((toc - tic)/60/60),
''.join(['0',str(math.floor((toc - tic)/60))]) if math.floor((toc - tic)/60) < 10 else math.floor((toc - tic)/60),
''.join(['0',str(math.floor((toc - tic)/60 % math.floor((toc - tic)/60) * 60))]) if (toc - tic)/60 % math.floor((toc - tic)/60) * 60 < 10 else math.floor((toc - tic)/60 % math.floor((toc - tic)/60) * 60)
]
)
class banner:
def capp_runs(BANNER):
"""statement go to SMRRQCM and get you CAPP info.
Args:
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
Data Frame: Total CAPP excecuted or running
"""
CAPP = pd.read_sql_query("SELECT COUNT (*) FROM SMRRQCM WHERE SMRRQCM_COMPLY_DATE IS NOT NULL", BANNER)
return CAPP
def capp_not_runs(BANNER):
"""statement go to SMRRQCM and get you CAPP info.
Args:
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
Data Frame: Total CAPP NO excecuted or running
"""
CAPP = pd.read_sql_query("SELECT COUNT (*) FROM SMRRQCM WHERE SMRRQCM_COMPLY_DATE IS NULL", BANNER)
return CAPP
def program_capp(comply_date = "Y"):
"""Count PIDMS with CAPP or NOT in selected program.
Args:
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
comply_date (str, optional): Whether 'Y' you can check students with CAPP or not 'N'. Defaults to "Y".
Returns:
Data Frame: Data Frame with the students total number with or not CAPP from program choosed.
"""
if (comply_date == "Y"):
print("Buscando SMRRQCM_COMPLY_DATE IS NOT NULL","\n")
PROGRAMS_BANNER_CAPP = pd.read_sql_query("SELECT COUNT (DISTINCT SMRRQCM_PIDM) AS NUMBER_STUDENT, SMRRQCM_PROGRAM AS PROGRAM_STUDENT FROM SMRRQCM WHERE SMRRQCM_COMPLY_DATE IS NOT NULL GROUP BY SMRRQCM_PROGRAM ORDER BY COUNT (DISTINCT SMRRQCM_PIDM)", BANNER)
elif (comply_date == "N"):
print("Buscando SMRRQCM_COMPLY_DATE IS NULL","\n")
PROGRAMS_BANNER_CAPP = pd.read_sql_query("SELECT COUNT (DISTINCT SMRRQCM_PIDM) AS NUMBER_STUDENT, SMRRQCM_PROGRAM AS PROGRAM_STUDENT FROM SMRRQCM WHERE SMRRQCM_COMPLY_DATE IS NULL GROUP BY SMRRQCM_PROGRAM ORDER BY COUNT (DISTINCT SMRRQCM_PIDM)", BANNER)
else:
print("we need: 'Y' or 'N' \n")
return PROGRAMS_BANNER_CAPP
def count_student(program, period, BANNER):
"""Count students in program from a period.
Args:
program (str): Program to be count.
period (int): Period to be count.
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
Data Frame: Total students there are in selected program.
"""
Query = ''.join(["SELECT COUNT(DISTINCT SPRIDEN_ID) AS NUMBR_STUDENT, SOVLCUR_PROGRAM AS PROGRAM_STUDENT FROM SOVLCUR , SGBSTDN G1, SORLCUR, SORLFOS, SPRIDEN WHERE SOVLCUR_PROGRAM = '" ,program, "' AND SOVLCUR_LMOD_CODE = 'LEARNER' AND SOVLCUR_CURRENT_IND = 'Y' AND SOVLCUR_ACTIVE_IND = 'Y' AND SORLFOS_PIDM = SORLCUR_PIDM AND SORLCUR_SEQNO = SORLFOS_LCUR_SEQNO AND SOVLCUR_PIDM = SORLCUR_PIDM AND SOVLCUR_PROGRAM = SORLCUR_PROGRAM AND SORLFOS_CSTS_CODE <> 'AWARDED' AND SGBSTDN_PIDM = SOVLCUR_PIDM AND SGBSTDN_STST_CODE <> 'IS' AND SOVLCUR_PIDM = SPRIDEN_PIDM AND G1.SGBSTDN_TERM_CODE_EFF = (SELECT MAX(SGBSTDN_TERM_CODE_EFF) FROM SGBSTDN WHERE SGBSTDN_TERM_CODE_EFF <= ",str(period)," AND SGBSTDN_PIDM = G1.SGBSTDN_PIDM) GROUP BY SOVLCUR_PROGRAM"])
PROGRAM = pd.read_sql_query(Query, BANNER)
return PROGRAM
def student(program, period, BANNER):
"""List of student in programa for a period.
Args:
program (str): Program where student are.
period (int): Period where student are.
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
Data Frame: return list of student in the Program.
"""
print(''.join(["Esta ejecutando el programa: ", program, " para el periodo ", str(period)]))
Query = ''.join(["SELECT DISTINCT SPRIDEN_ID AS ESTUDIANTES, SOVLCUR_PROGRAM AS PROGRAMA FROM SOVLCUR , SGBSTDN G1, SORLCUR, SORLFOS, SPRIDEN WHERE SOVLCUR_PROGRAM = '" ,program, "' AND SOVLCUR_LMOD_CODE = 'LEARNER' AND SOVLCUR_CURRENT_IND = 'Y' AND SOVLCUR_ACTIVE_IND = 'Y' AND SORLFOS_PIDM = SORLCUR_PIDM AND SORLCUR_SEQNO = SORLFOS_LCUR_SEQNO AND SOVLCUR_PIDM = SORLCUR_PIDM AND SOVLCUR_PROGRAM = SORLCUR_PROGRAM AND SORLFOS_CSTS_CODE <> 'AWARDED' AND SGBSTDN_PIDM = SOVLCUR_PIDM AND SGBSTDN_STST_CODE <> 'IS' AND SOVLCUR_PIDM = SPRIDEN_PIDM AND G1.SGBSTDN_TERM_CODE_EFF = (SELECT MAX(SGBSTDN_TERM_CODE_EFF) FROM SGBSTDN WHERE SGBSTDN_TERM_CODE_EFF <= ",str(period)," AND SGBSTDN_PIDM = G1.SGBSTDN_PIDM)"])
PROGRAM = pd.read_sql_query(Query, BANNER)
return PROGRAM
def studypath_student(ID_STUDENT, BANNER):
"""Give you studypath student with ID student.
Args:
ID_STUDENT (int): ID student.
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
DataFrame: return studypath student.
"""
statement = ''.join(["SELECT DISTINCT SPRIDEN_ID AS ID,SPRIDEN_PIDM AS PIDM,SOVLCUR_KEY_SEQNO AS STUDYPATH,SOVLCUR_PROGRAM AS PROGRAMA,SOVLCUR_CURRENT_IND AS ACTUAL,SOVLCUR_ACTIVE_IND AS ACTIVO, SOVLCUR_TERM_CODE AS PERIODO FROM SOVLCUR,SPRIDEN WHERE SPRIDEN_PIDM = SOVLCUR_PIDM AND SPRIDEN_ID = '", str(ID_STUDENT), "'"])
STSP = pd.read_sql_query(statement, BANNER)
return STSP
def search_PIDM(PIDM, BANNER):
"""Give you general codes from student with PIDM.
Args:
PIDM (int): student PIDM
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
DataFrame: return data frame with types codes of student
"""
print("Buscando:",PIDM,"\t", "\n")
STATEMENT = ''.join(["SELECT SPRIDEN_ID, SPRIDEN_PIDM, SPBPERS_SSN, SPBPERS_LEGAL_NAME FROM SPRIDEN, SPBPERS WHERE SPRIDEN_PIDM = SPBPERS_PIDM AND SPRIDEN_PIDM = '", str(PIDM), "'"])
PERSON = pd.read_sql_query(STATEMENT, BANNER)
return PERSON
def search_ID(ID, BANNER):
"""Give you general codes from student with ID.
Args:
ID (int): ID in BANNER of student.
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
DataFrame: return data frame with types codes of student
"""
STATEMENT = ''.join(["SELECT SPRIDEN_ID, SPRIDEN_PIDM, SPBPERS_SSN, SPBPERS_LEGAL_NAME FROM SPRIDEN, SPBPERS WHERE SPRIDEN_PIDM = SPBPERS_PIDM AND SPRIDEN_ID = '", str(ID), "'"])
PERSON = pd.read_sql_query(STATEMENT, BANNER)
return PERSON
def search_ssn(SSN, BANNER):
"""Give you general codes from student with SSN.
Args:
SSN (int): SSN or CC of student.
BANNER (cx_Oracle.Connection): Connection to Oracle Database.
Returns:
DataFrame: return data frame with types codes of student
"""
STATEMENT = ''.join(["SELECT SPRIDEN_ID, SPRIDEN_PIDM, SPBPERS_SSN, SPBPERS_LEGAL_NAME FROM SPRIDEN, SPBPERS WHERE SPRIDEN_PIDM = SPBPERS_PIDM AND SPBPERS_SSN IN ('", str(SSN), "')"])
PERSON = pd.read_sql_query(STATEMENT, BANNER)
return PERSON
class capp:
"""class of CAPP in Banner. Remember: CAPP run = 'Y'. CAPP not run = 'N'. Both = 'B'
"""
def capp_summary(CAPP = "B"):
if (CAPP == "B"):
print(banner.capp_not_runs())
print(banner.capp_runs())
elif (CAPP == "Y"):
banner.capp_runs()
elif (CAPP == "N"):
banner.capp_not_runs()
else:
print("Remember: CAPP run = 'Y'. CAPP not run = 'N'. Both = 'B'") |
import string
import random
from django import forms
from .models import Tool
from utils.utilities import is_empty
class ToolForm(forms.ModelForm):
class Meta:
model = Tool
fields = ['name', 'status', 'shared_from', 'category', 'description', 'picture']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'status': forms.Select(attrs={'class': 'form-control'}),
'shared_from': forms.Select(attrs={'class': 'form-control'}),
'category': forms.Select(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control resize-text-area-none'}),
'picture': forms.FileInput(attrs={'class': 'form-control'}),
}
'''def clean_id(self):
return self.cleaned_data["id"]'''
def clean_name(self):
if is_empty(self.cleaned_data["name"]):
raise forms.ValidationError("Invalid name.")
return self.cleaned_data["name"]
def clean_status(self):
return self.cleaned_data["status"]
def clean_description(self):
if is_empty(self.cleaned_data["description"]):
raise forms.ValidationError("Invalid description.")
return self.cleaned_data["description"]
def clean_is_shared_from_home(self):
return self.cleaned_data["is_shared_from_home"]
def clean_category(self):
return self.cleaned_data["category"]
def clean_picture(self):
if 'picture' in self.cleaned_data:
file_name = str(self.cleaned_data['picture']).lower()
file_parts = file_name.split(".")
if not file_parts[-1] in ['jpeg', 'png', 'bmp', 'gif', 'jpg']:
raise forms.ValidationError("Invalid image format.")
try:
if self.cleaned_data['picture'].size > 3*1024*1024:
raise forms.ValidationError("Image file too large (> 3MB).")
except AttributeError:
pass
return self.cleaned_data["picture"]
def set_owner(self, owner):
self.instance.owner = owner
def generate_code(self):
while True:
new_code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
try:
Tool.objects.get(code=new_code)
except:
self.instance.code = new_code
break
|
from wagtail.tests.utils import WagtailPageTests
from wagtail.core.models import Page
from .models import HomePage
class SlugDiacriticsTestCase(WagtailPageTests):
def test_basic_plane(self):
root = Page.objects.get(url_path="/home/")
page = HomePage(title="Hello Vienna")
root.add_child(instance=page)
self.assertEqual(page.slug, "hello-vienna")
def test_latin_extended(self):
root = Page.objects.get(url_path="/home/")
page = HomePage(title="Hello İstanbul")
root.add_child(instance=page)
self.assertEqual(page.slug, "hello-istanbul")
|
import json
import os
from unittest.mock import MagicMock, patch, call
import pytest
from configobj import ConfigObj
from peek.connection import ConnectFunc
from peek.natives import ConnectionFunc, SessionFunc
from peek.peekapp import PeekApp
mock_history = MagicMock()
MockHistory = MagicMock(return_value=mock_history)
@pytest.fixture
def peek_app():
from peek import __file__ as package_root
package_root = os.path.dirname(package_root)
package_config_file = os.path.join(package_root, 'peekrc')
config_obj = ConfigObj(package_config_file)
def get_config(_, extra_config):
config_obj.merge(ConfigObj(extra_config))
return config_obj
class MockCliNs:
def __init__(self):
self.username = 'foo'
self.password = 'password'
self.zero_connection = False
with patch('peek.peekapp.PromptSession', MagicMock()), \
patch('peek.peekapp.get_config', get_config), \
patch('peek.peekapp.SqLiteHistory', MockHistory):
return PeekApp(extra_config_options=('log_level=None', 'use_keyring=False'), cli_ns=MockCliNs())
@patch.dict(os.environ, {'PEEK_PASSWORD': 'password'})
def test_connection_related_funcs(peek_app):
connect_f = ConnectFunc()
assert '* [1] bar @ https://localhost:9200' in connect_f(
peek_app, username='bar', password='password', use_ssl=True)
assert '* [2] K-id @ http://example.com:9200' in connect_f(
peek_app, api_key='id:key', hosts='example.com:9200')
assert '* [3] token-auth' in connect_f(
peek_app, token='access_token', name='token-auth')
connection_f = ConnectionFunc()
assert connection_f(peek_app) == ''' [0] foo @ http://localhost:9200
[1] bar @ https://localhost:9200
[2] K-id @ http://example.com:9200
* [3] token-auth'''
assert connection_f(peek_app, 1) == ''' [0] foo @ http://localhost:9200
* [1] bar @ https://localhost:9200
[2] K-id @ http://example.com:9200
[3] token-auth'''
session_f = SessionFunc()
assert "Session save as: '__default__'" == session_f(peek_app, **{'@': ['save']})
mock_history.save_session.assert_called_with('__default__', json.dumps(peek_app.es_client_manager.to_dict()))
mock_history.load_session = MagicMock(return_value=json.dumps(peek_app.es_client_manager.to_dict()))
assert connection_f(peek_app, rename='local-bar') == ''' [0] foo @ http://localhost:9200
* [1] local-bar
[2] K-id @ http://example.com:9200
[3] token-auth'''
assert connection_f(peek_app, **{'@': ['info']}) == {
'name': 'local-bar', 'hosts': 'localhost:9200',
'cloud_id': None, 'auth': 'Username bar', 'use_ssl': True,
'verify_certs': False, 'ca_certs': None, 'client_cert': None,
'client_key': None, 'headers': None}
assert connection_f(peek_app, remove=0) == '''* [0] local-bar
[1] K-id @ http://example.com:9200
[2] token-auth'''
assert connection_f(peek_app, 'token-auth') == ''' [0] local-bar
[1] K-id @ http://example.com:9200
* [2] token-auth'''
assert connection_f(peek_app, keep=1) == '''* [0] K-id @ http://example.com:9200'''
assert session_f(peek_app, **{'@': ['load']}) == ''' [0] foo @ http://localhost:9200
* [1] bar @ https://localhost:9200
[2] K-id @ http://example.com:9200
[3] token-auth'''
assert connection_f(peek_app, move=0) == '''* [0] bar @ https://localhost:9200
[1] foo @ http://localhost:9200
[2] K-id @ http://example.com:9200
[3] token-auth'''
assert connection_f(peek_app, move=3) == ''' [0] foo @ http://localhost:9200
[1] K-id @ http://example.com:9200
[2] token-auth
* [3] bar @ https://localhost:9200'''
assert connection_f(peek_app, move=3) == ''' [0] foo @ http://localhost:9200
[1] K-id @ http://example.com:9200
[2] token-auth
* [3] bar @ https://localhost:9200'''
def test_connect_with_failed_test_will_not_be_added(peek_app):
peek_app.display = MagicMock()
peek_app.display.error = MagicMock()
mock_es = MagicMock()
error = RuntimeError('Should fail')
def mock_perform_request(*args, **kwargs):
raise error
mock_es.transport.perform_request = MagicMock(side_effect=mock_perform_request)
MockEs = MagicMock(return_value=mock_es)
with patch('peek.connection.Elasticsearch', MockEs):
connect_f = ConnectFunc()
assert connect_f(peek_app, username=None, test=True) is None
peek_app.display.error.assert_called_with(error)
assert str(peek_app.es_client_manager) == '* [0] foo @ http://localhost:9200'
def test_echo(peek_app):
echo_f = peek_app.vm.functions['echo']
assert echo_f(peek_app, 0) == '0'
assert echo_f(peek_app, 1) == '1'
assert echo_f(peek_app, True) == 'true'
assert echo_f(peek_app, False) == 'false'
assert echo_f(peek_app, None) == 'null'
assert echo_f(peek_app, 'hello') == '"hello"'
assert echo_f(peek_app, echo_f) == '"<PeekFunction echo>"'
assert echo_f(peek_app, {'foo': [True, False, None, 'bar', echo_f]}) == \
'{"foo":[true,false,null,"bar","<PeekFunction echo>"]}'
assert echo_f(peek_app, {}, [], 42) == '{} [] 42'
@patch.dict(os.environ, {'PEEK_PASSWORD': 'password'})
def test_reset(peek_app):
old_vm = peek_app.vm
peek_app.display.info = MagicMock()
peek_app.completer.init_api_specs = MagicMock()
peek_app.process_input('let foo = 42')
assert peek_app.vm.get_value('foo') == 42
peek_app.process_input('connection rename="c0"')
peek_app.process_input('connect name="c1"')
peek_app.process_input('connect name="c2"')
assert len(peek_app.es_client_manager.clients()) == 3
peek_app.process_input('reset')
assert len(peek_app.es_client_manager.clients()) == 1
assert str(peek_app.es_client_manager) == '* [0] foo @ http://localhost:9200'
assert peek_app.vm is not old_vm
peek_app.completer.init_api_specs.assert_called_once()
def test_randint(peek_app):
peek_app.display.info = MagicMock()
peek_app.process_input('let foo = randint(1,2)')
assert peek_app.vm.get_value('foo') == 1
peek_app.process_input('let bar = randint(1,10)')
assert 1 <= peek_app.vm.get_value('bar') < 10
peek_app.process_input('let fiz = randint(10)')
assert 0 <= peek_app.vm.get_value('bar') < 10
peek_app.process_input('let fiz = randint()')
assert 0 <= peek_app.vm.get_value('bar') < 100
def test_getenv(peek_app):
peek_app.display.info = MagicMock()
mock_os_getenv = MagicMock()
with patch('os.getenv', mock_os_getenv):
peek_app.process_input('let a = getenv("CODE")')
peek_app.process_input('let b = getenv("FOO")')
mock_os_getenv.assert_has_calls([call("CODE", ''), call("FOO", '')])
def test_version(peek_app):
peek_app.display.info = MagicMock()
peek_app.process_input('let v = version()')
value = peek_app.vm.get_value('v')
assert 'Peek' in value
from peek import __version__
assert f'v{__version__}' in value
assert 'elasticsearch-py' in value
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-01 08:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0011_auto_20170501_1459'),
]
operations = [
migrations.AlterModelOptions(
name='userprofile',
options={'permissions': (('crm_table_list', '可以查看kingadmin每张表里所有的数据'), ('crm_table_list_view', '可以访问kingadmin表里每条数据的修改页'), ('crm_table_list_change', '可以对kingadmin表里的每条数据进行修改'), ('crm_table_obj_add_view', '可以访问kingadmin每张表的数据增加页'), ('crm_table_obj_add', '可以对kingadmin每张表进行数据添加'))},
),
]
|
import asyncio
import json
import signal
from typing import Callable
import pyppeteer
from pyee import AsyncIOEventEmitter
from pymessages.service import MessageService
class ClientOptions:
headless:bool
credentials: object
def __init__(self,headless=False, credentials={"cookies":[], "localStorage": {}}):
self.headless = headless
self.credentials = credentials
class MessagesClient(AsyncIOEventEmitter):
page: pyppeteer.page.Page
browser: pyppeteer.browser.Browser
groups = []
listeners = {}
options: ClientOptions
is_authenticated: bool = False
def __init__(self, credentials={"cookies":[], "localStorage": {}}, headless=True):
self.loop = asyncio.get_event_loop()
super(MessagesClient, self).__init__(loop=self.loop)
try:
self.loop.add_signal_handler(signal.SIGTERM, self.stop)
except NotImplementedError:
pass
self.options = ClientOptions(headless, credentials)
def stop(self):
print('Pymessages stopping.')
self.loop.stop()
def launch(self):
self.loop.run_until_complete(self._launch(self.options))
def idle(self, close=True):
self.loop.run_forever()
@staticmethod
def loadCredentialFile(path):
with open(path) as f:
cred = json.load(f)
return cred
async def _attachReqTracer(self):
@self.page.on('request')
def on_request(request):
url = request.url
# print("Check ------")
if "Pairing/GetWebEncryptionKey" in url:
# print("YESSSSS")
service = MessageService(self.page)
if not self.is_authenticated:
self.emit('authenticated', service) #TODO: pass credentials as well
self.is_authenticated = True
async def _attachQrReader(self):
await self.page.waitForSelector("body > mw-app > mw-bootstrap > div > main > mw-authentication-container > div > div.content-container > div > div.qr-code-container > div.qr-code-wrapper > mw-qr-code")
async def _func_to_expose():
img = await self.page.J('body > mw-app > mw-bootstrap > div > main > mw-authentication-container > div > div.content-container > div > div.qr-code-container > div.qr-code-wrapper > mw-qr-code > img')
if img:
src = await img.getProperty('src')
if src:
self.emit('qr-code', await src.jsonValue()) # qrData = base64 qr image
await self.page.exposeFunction('onQrChange', _func_to_expose)
await self.page.evaluate(""" () => {
const observer = new MutationObserver((mutations) => {
for (const mutation of mutations) {
if (mutation.attributeName === 'data-qr-code') {
// @ts-ignore
window.onQrChange(mutation)
}
}
})
const img = document.querySelector("body > mw-app > mw-bootstrap > div > main > mw-authentication-container > div > div.content-container > div > div.qr-code-container > div.qr-code-wrapper > mw-qr-code")
if (img) {
observer.observe(img, { attributes: true, childList: true, characterData: true })
}
return observer
}
""")
await self.page.waitForSelector('body > mw-app > mw-bootstrap > div > main > mw-authentication-container > div > div.content-container > div > div.qr-code-container > div.qr-code-wrapper > mw-qr-code > img')
img = await self.page.J('body > mw-app > mw-bootstrap > div > main > mw-authentication-container > div > div.content-container > div > div.qr-code-container > div.qr-code-wrapper > mw-qr-code > img')
if img:
src = await img.getProperty("src")
if src:
self.emit('qr-code', await src.jsonValue())
async def _launch(self, options: ClientOptions):
browser = await pyppeteer.launch({"headless": options.headless})
self.browser = browser
page = await browser.newPage()
self.page = page
await self.page.goto('https://messages.android.com', { "waitUntil": 'load' })
await self.page.waitForSelector('#mat-slide-toggle-1-input')
await self.page.evaluate("""() => {
const checkbox = document.querySelector('#mat-slide-toggle-1-input')
checkbox.click()
}
""") #remember me button
self.emit('browser-launched')
if len(options.credentials["localStorage"]) == 0:
await self._attachQrReader()
await self._attachReqTracer()
return
else:
await self.setCredentials(options.credentials)
service = MessageService(self.page)
self.emit('authenticated', service)
self.is_authenticated = True
try:
await self.page.waitForSelector('#mat-checkbox-1')
dontshowCheckBox = await self.page.J('#mat-checkbox-1')
await dontshowCheckBox.click()
dontShowBtn = await self.page.J('body > mw-app > mw-bootstrap > div > main > mw-main-container > div > mw-main-nav > div > mw-banner > div > mw-remember-this-computer-banner > div > div.button-align > button.action-button.confirm.mat-focus-indicator.mat-button.mat-button-base')
await dontShowBtn.click()
except:
pass
async def getCredentials(self):
await self.page.waitForFunction('!!localStorage.getItem("pr_backend_type")')
localStorageData = await self.page.evaluate("""() => {
let data = {}
Object.assign(data, window.localStorage)
return data
}
""")
cookiz = await self.page.cookies()
creds = {
"cookies": cookiz,
"localStorage": localStorageData
}
return creds
async def setCredentials(self, credentials: dict):
await self.page.setCookie(*credentials["cookies"])
await self.page.evaluate("""(localStorageData) => {
try {
localStorageData = JSON.parse(localStorageData)
} catch (err) {}
for (const key of Object.keys(localStorageData)) {
localStorage.setItem(key, localStorageData[key])
}
}""", json.dumps(credentials["localStorage"]))
await self.page.reload()
return
async def quit(self):
await self.browser.close()
def __del__(self):
self.loop.run_until_complete(self.quit())
|
import numpy as np
import pandas as pd
import tensorflow as tf
tf.enable_eager_execution()
from sklearn.preprocessing import StandardScaler
import pickle
import sys
sys.path.append('../../rumm')
import lang
import nets
import bayesian
# constants
BATCH_SZ = 1024
vocab_size = 36
'''
# load the dataset
zinc_df = pd.read_csv('../../../6_prop.xls', sep='\t')
# shuffle it, and conduct training-test split
zinc_df = zinc_df.sample(zinc_df.shape[0])
n_samples = zinc_df.shape[0]
n_tr = int(0.8 * n_samples)
y_tr = np.array(zinc_df.values[:n_tr, 1:-1], dtype=np.float32)
x_tr = zinc_df.values[:n_tr, -1]
y_te = np.array(zinc_df.values[n_tr:, 1:-1], dtype=np.float32)
x_te = zinc_df.values[n_tr:, -1]
x_tr = np.apply_along_axis(lambda x: 'G' + x + 'E', 0, x_tr)
x_te = np.apply_along_axis(lambda x: 'G' + x + 'E', 0, x_te)
# calculate the std of y_tr for loss function
scaler = StandardScaler(copy=False)
y_tr = scaler.fit_transform(y_tr)
y_te = scaler.transform(y_te)
f_handle = open('scaler.p', 'wb')
pickle.dump(scaler, f_handle)
f_handle.close()
# save the dataset for later use
np.save('y_tr', y_tr)
np.save('x_tr', x_tr)
np.save('y_te', y_te)
np.save('x_te', x_te)
'''
y_tr = np.load('y_tr.npy')
x_tr = np.load('x_tr.npy')
fp_tr = np.load('fp_tr.npy')
f_handle = open('lang_obj.p', 'rb')
lang_obj = pickle.load(f_handle)
f_handle.close()
# define models
enc_f = nets.GRUEncoder(vocab_size=vocab_size, batch_sz = BATCH_SZ, reverse=False,
enc_units = 128)
enc_b = nets.GRUEncoder(vocab_size=vocab_size, batch_sz = BATCH_SZ, reverse=True,
enc_units = 128)
conv_encoder = nets.ConvEncoder(
conv_units=[256, 512, 512],
# pool_sizes=[8, 8, 8, 8],
conv_kernel_sizes=[8, 12, 16],
fcs=[128, 0.2, 'elu',
512, 0.2, 'elu',
512])
fcuk = nets.FullyConnectedUnits([512, 'leaky_relu', 0.25, 512])
d_mean = nets.FullyConnectedUnits([32])
d_log_var = nets.FullyConnectedUnits([32])
fcuk_props = nets.FullyConnectedUnits([9])
fcuk_fp = nets.FullyConnectedUnits([167, 'sigmoid'])
decoder = nets.OneHotDecoder(vocab_size=vocab_size, dec_units = 256)
bypass_v_f = nets.FullyConnectedUnits([1])
simple_decoder = nets.SimpleDecoder(vocab_size=vocab_size, dec_units=1024,
batch_sz = BATCH_SZ)
# initialize
xs = tf.zeros([BATCH_SZ, 64], dtype=tf.int64)
eo_f, h_f = enc_f(xs)
eo_b, h_b = enc_b(xs)
x_attention = tf.concat([h_f, h_b], axis=-1)
x_attention = fcuk(x_attention)
x_conv = conv_encoder(tf.one_hot(xs, 33))
x = tf.concat([x_attention, x_conv], axis=-1)
mean = d_mean(x)
log_var = d_log_var(x)
z_noise = tf.clip_by_norm(tf.random_normal(mean.shape), 1e5) * tf.exp(log_var * .5)
z = z_noise + mean
ys_hat = fcuk_props(mean)
fp_hat = fcuk_fp(mean)
xs_bar = decoder(z)
# load weights
enc_f.load_weights('weights/enc_f.h5')
enc_b.load_weights('weights/enc_b.h5')
conv_encoder.load_weights('weights/conv_encoder.h5')
fcuk.load_weights('weights/fcuk.h5')
d_mean.load_weights('weights/d_mean.h5')
d_log_var.load_weights('weights/d_log_var.h5')
fcuk_props.load_weights('weights/fcuk_props.h5')
fcuk_fp.load_weights('weights/fcuk_fp.h5')
# bypass_v_f.load_weights('weights/bypass_v_f.h5')
decoder.load_weights('weights/decoder.h5')
# convert to tensor
x_tr = tf.convert_to_tensor(x_tr)
y_tr = tf.convert_to_tensor(y_tr)
fp_tr = tf.convert_to_tensor(fp_tr)
# make them into a dataset object
ds = tf.data.Dataset.from_tensor_slices((x_tr, y_tr, fp_tr))
ds = ds.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SZ))
x_tr = None
y_tr = None
fp_tr = None
x_te = None
y_te = None
fp_te = None
optimizer = tf.train.AdamOptimizer(1e-5)
anneal_step = tf.constant(100000000.0, dtype=tf.float32)
for epoch in range(10000):
# loop through the batches
for (batch, (xs, ys, fps)) in enumerate(ds):
# TODO
# one training batch
n_iter = tf.constant(epoch * int(n_samples) + batch * BATCH_SZ, dtype=tf.float32)
kl_anneal = tf.cond(n_iter < anneal_step,
lambda: tf.math.sin(tf.div(n_iter, anneal_step) * 0.5 * tf.constant(np.pi, dtype=tf.float32)),
lambda: tf.constant(1.0, dtype=tf.float32))
with tf.GradientTape() as tape: # for descent
# training
eo_f, h_f = enc_f(xs)
eo_b, h_b = enc_b(xs)
x_attention = tf.concat([h_f, h_b], axis=-1)
x_attention = fcuk(x_attention)
x_conv = conv_encoder(tf.one_hot(xs, 33))
x = tf.concat([x_attention, x_conv], axis=-1)
mean = d_mean(x)
log_var = d_log_var(x)
z_noise = tf.clip_by_norm(tf.random_normal(mean.shape), 1e5) * tf.exp(log_var * .5)
z = z_noise + mean
ys_hat = fcuk_props(mean)
fp_hat = fcuk_fp(mean)
loss0 = tf.clip_by_value(tf.losses.mean_squared_error(ys, ys_hat), 0.0, 1e5)
loss1 = tf.clip_by_value(tf.losses.log_loss(fps, fp_hat), 0.0, 1e5)
xs_bar = decoder(z)
loss2 = tf.clip_by_value(
tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels = xs, logits = xs_bar)), 0.0, 1e20)
loss3 = tf.clip_by_value(kl_anneal * tf.reduce_mean(-0.5 * tf.reduce_mean(1 + log_var - tf.square(mean) - tf.exp(log_var), axis=[1])), 1.0, 1e5)
lt = loss0 + loss1 + loss2 + loss3
# start grad norm
variables = conv_encoder.variables +\
d_mean.variables + decoder.variables + d_log_var.variables +\
enc_f.variables + enc_b.variables + fcuk.variables +\
fcuk_props.variables + fcuk_fp.variables
gradients = tape.gradient(lt, variables)
optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step())
if batch % 100 == 0:
print(loss0.numpy(), loss1.numpy(), loss2.numpy(), loss3.numpy())
if (batch % 1000 == 0) and ( np.isnan(lt.numpy()) == False):
fcuk.save_weights('./fcuk.h5')
enc_f.save_weights('./enc_f.h5')
enc_b.save_weights('./enc_b.h5')
fcuk_props.save_weights('./fcuk_props.h5')
fcuk_fp.save_weights('./fcuk_fp.h5')
d_mean.save_weights('./d_mean.h5')
d_log_var.save_weights('./d_log_var.h5')
decoder.save_weights('./decoder.h5')
bypass_v_f.save_weights('./bypass_v_f.h5')
simple_decoder.save_weights('./simple_decoder.h5')
|
import os
import sys
from subprocess import Popen, run, PIPE
from datetime import datetime
import logging
from configparser import ExtendedInterpolation
import aiofiles
import reusables
from sanic import Sanic
from sanic.response import json
from box import Box, ConfigBox
class PCError(Exception):
"""Pymote Control Error"""
config = reusables.config_namespace(["pymote.ini", "config.ini"],
interpolation=ExtendedInterpolation())
if not config:
config = ConfigBox({'Pymote': {
"io_dir": "io",
"data_file": "data.json",
"log_level": 10,
"cleanup_on_start": True,
"auth_type": "headers",
"auth_tokens": "pass,password",
"port": 6666,
"host": "0.0.0.0",
"log_file": "pymote_control.log"
}})
log = reusables.setup_logger("pymote",
level=config.Pymote.int('log_level'),
file_path=os.path.expanduser(
config.Pymote.log_file))
app = Sanic("pymote")
os.makedirs(config.Pymote.io_dir, exist_ok=True)
try:
data = Box.from_json(filename=config.Pymote.data_file)
except FileNotFoundError:
data = Box()
processes = Box()
@app.middleware('request')
async def check_auth(request):
if config.Pymote.auth_type == "headers":
token = request.headers.get('auth')
if token not in config.Pymote.list('auth_tokens'):
return json({'error': 'Not Authorized'}, status=403)
def still_running(pid):
if data[pid].finished:
return False
if pid in processes:
if processes[pid].poll() is not None:
data[pid].finished = True
data[pid].return_code = processes[pid].poll()
return False
elif sys.platform.startswith("linux"):
resp = run(f"kill -0 {pid}", shell=True, stdout=PIPE, stderr=PIPE)
if b"No such process" in resp.stderr:
data[pid].finished = True
return False
return True
def cleanup_on_start():
log.info("Cleaning up old data")
delete = []
for pid, info in data.items():
if not still_running(pid):
try:
os.unlink(f"{info.base}_stdout")
os.unlink(f"{info.base}_stderr")
os.unlink(f"{info.base}_stdin")
except OSError:
log.exception(f"Could not clean up all {pid} files")
delete.append(pid)
for x in delete:
del data[x]
data.to_json(filename=config.Pymote.data_file)
async def start_program(command, **kwargs):
file_base = f"{config.Pymote.io_dir}{os.sep}{datetime.utcnow().isoformat()}"
reusables.touch(f"{file_base}_stdin")
p = Popen(command, shell=True,
stdout=open(f"{file_base}_stdout", "w"),
stderr=open(f"{file_base}_stderr", "w"),
stdin=open(f"{file_base}_stdin"),
preexec_fn=os.setpgrp,
**kwargs)
data[str(p.pid)] = {"base": file_base,
"finished": False,
"log_pos": {"stdout": 0, "stderr": 0},
"return_code": None}
data.to_json(filename=config.Pymote.data_file)
processes[str(p.pid)] = p
return str(p.pid)
async def read_file(pid, file, full=False):
async with aiofiles.open(f"{data[pid].base}_{file}") as f:
if not full:
await f.seek(data[pid].log_pos[file])
out = await f.read()
data[pid].log_pos[file] = await f.tell()
return out
@app.route("/v1/program/<pid>", methods=["GET"])
async def get_logs(request, pid):
if pid not in data:
return json({'error': f'PID {pid} does not exist'}, status=400)
full = request.args.get('full', False)
still_running(pid)
stdout = await read_file(pid, "stdout", full=full)
stderr = await read_file(pid, "stderr", full=full)
data.to_json(filename=config.Pymote.data_file)
return json({'stdout': stdout,
'stderr': stderr,
'finished': data[pid].finished,
'return_code': data[pid].return_code})
@app.route("/v1/program/", methods=["POST"])
async def new_program(request):
pid = await start_program(request.json['command'])
return json({'pid': pid})
@app.route("/v1/program/<pid>/stop", methods=["POST"])
async def stop_program(request, pid):
if pid not in data:
return json({'error': f'PID {pid} does not exist'}, status=400)
if still_running(pid):
if pid in processes:
if processes[pid].poll() is not None:
processes[pid].terminate()
data[pid].return_code = processes[pid].returncode
data[pid].finished = True
elif sys.platform.startswith("linux"):
resp = run(f"kill -9 {pid}")
data[pid].finished = True
log.info(f"Manually killing {pid} resulted "
f"in message {resp.stdout} {resp.stderr}")
data.to_json(filename=config.Pymote.data_file)
return json({'return_code': data[pid].return_code})
@app.route("/v1/program/<pid>", methods=["DELETE"])
async def stop_and_delete_logs(request, pid):
if pid not in data:
return json({'error': f'PID {pid} does not exist'}, status=400)
if still_running(pid):
if pid in processes:
if processes[pid].poll() is not None:
processes[pid].terminate()
data[pid].finished = True
elif sys.platform.startswith("linux"):
resp = run(f"kill -9 {pid}")
data[pid].finished = True
log.info(f"Manually killing {pid} resulted "
f"in message {resp.stdout} {resp.stderr}")
data.to_json(filename=config.Pymote.data_file)
try:
os.unlink(f"{data[pid].base}_stdout")
os.unlink(f"{data[pid].base}_stderr")
os.unlink(f"{data[pid].base}_stdin")
except OSError:
log.exception(f"Could not clean up all {pid} files")
del data[pid]
if pid in processes:
return_code = processes[pid].returncode
del processes[pid]
return json({'return_code': return_code})
return json({})
if __name__ == '__main__':
# Remove the stupid logo
sanic_log = logging.getLogger('sanic')
sanic_log.setLevel(logging.INFO)
log.info("Starting Pymote Control Center")
log.debug(open(f"{os.path.dirname(os.path.realpath(__file__))}{os.sep}"
f"ascii_logo.txt").read())
if config.Pymote.bool('cleanup_on_start'):
cleanup_on_start()
try:
app.run(host=config.Pymote.host, port=config.Pymote.int('port'))
finally:
os._exit(0)
|
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column
from bokeh.models import ColumnDataSource, Range1d
from bokeh.models.widgets import Slider, Button, RadioGroup, Dropdown, RadioButtonGroup
from bokeh.plotting import figure
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.models import ColumnDataSource, CustomJS
from os.path import dirname, join
import numpy as np
import pandas as pd
df = pd.read_hdf("data/mean_stdev.h5")
arteries = ["1-aortic_arch_I", "2-brachiocephalic_trunk", "3-subclavian_R_I", "4-subclavian_R_II",
"5-radial_R", "6-ulnar_R_I", "7-ulnar_R_II", "8-common_interosseous_R", "9-vertebral_R",
"10-common_carotid_R", "11-external_carotid_R", "12-internal_carotid_R", "13-aortic_arch_II",
"14-common_carotid_L", "15-internal_carotid_L", "16-external_carotid_L", "17-aortic_arch_III",
"18-subclavian_L_I", "19-vertebral_L", "20a-subclavian_L_II", "20b-axillary_L", "21-radial_L",
"22-ulnar_L_I", "23-ulnar_L_II", "24-common_interosseous_L", "25-aortic_arch_IV",
"26-posterior_intercostal_T6_R", "27-thoracic_aorta_II", "28-posterior_intercostal_T6_L",
"29-thoracic_aorta_III", "30-posterior_intercostal_T7_R", "31-thoracic_aorta_IV",
"32-posterior_intercostal_T7_L", "33-thoracic_aorta_V", "34-celiac_trunk", "35-common_hepatic",
"36-splenic_I", "37-splenic_II", "38-left_gastric", "39-abdominal_aorta_I", "40-superior_mesenteric",
"41-abdominal_aorta_II", "42-renal_L", "43-abdominal_aorta_III", "44-renal_R",
"45-abdominal_aorta_IV", "46-inferior_mesenteric", "47-abdominal_aorta_V", "48-common_iliac_R",
"49-internal_iliac_R", "50-external_iliac_R", "51-profunda_femoris_R", "52-femoral_R_II",
"53-popliteal_R_II", "54-anterior_tibial_R", "55-common_iliac_L", "56-internal_iliac_L",
"57-external_iliac_L", "58-profunda_femoris_L", "59-femoral_L_II", "60-popliteal_L_II",
"61-anterior_tibial_L", "62-basilar", "63-posterior_cerebral_P1_L", "64-posterior_cerebral_P2_L",
"65-posterior_communicating_L", "66-internal_carotid_II_L", "67-middle_cerebral_L",
"68-anterior_cerebral_I_L", "69-anterior_cerebral_II_L", "70-anterior_communicating",
"71-anterior_cerebral_II_R", "72-posterior_cerebral_P1_R", "73-posterior_cerebral_P2_R",
"74-posterior_communicating_R", "75-internal_carotid_II_R", "76-middle_cerebral_R",
"77-anterior_cerebral_I_R"]
arteries_labels = []
arteries_menu = []
for a in arteries:
aa = a.split("-")
aa[1] = aa[1].capitalize()
lbl = ''
for b in aa:
lbl += b+' '
arteries_menu.append((lbl, a))
# Set up data
a = 0
c = 0
dropdown = Dropdown(label="Select artery",
button_type="warning", menu=arteries_menu)
def change_dropdown_label(attr, old, new):
if dropdown.value.split("-")[0] == "20a":
idx = 19
elif dropdown.value.split("-")[0] == "20b":
idx = 20
elif int(dropdown.value.split("-")[0]) <= 19:
idx = int(dropdown.value.split("-")[0])-1
else:
idx = int(dropdown.value.split("-")[0])
dropdown.label = arteries_menu[idx][0]
dropdown.button_type = "default"
dropdown.on_change('value', change_dropdown_label)
x = np.linspace(0,1,100)
y = np.linspace(0,1,100)
source = ColumnDataSource(data=dict(xs=[x, x, x], ys=[y, y+2, y-2],
colors=["white", "white", "white"]))
# Set up plot
plot = figure(plot_height=500, plot_width=500, title=" ",
tools="crosshair, pan, reset, save, wheel_zoom, box_zoom, hover",
x_range=[0, 1])
plot.multi_line(xs='xs', ys='ys', source=source, color='colors')
plot.xaxis.axis_label = "time (s)"
plot.yaxis.axis_label = " "
table_source = ColumnDataSource(data=dict(time=[0], iavg=[0], istd=[0]))
download_button = Button(label="Download waveform (.csv)", button_type="default")
download_button.callback = CustomJS(args=dict(source=table_source),
code=open(join(dirname(__file__), "./download.js")).read())
table_columns = [
TableColumn(field="time", title="Time (s)"),
TableColumn(field="iavg", title="Mean"),
TableColumn(field="istd", title="SD")
]
data_table = DataTable(source=table_source, columns=table_columns, width=800)
ages = ["20", "30", "40", "50", "60", "70"]
ages_lbl = ["20+", "30+", "40+", "50+", "60+", "70+"]
radio_group_age = RadioButtonGroup(labels=ages_lbl, active=0)
radio_group = RadioButtonGroup(labels=["SI units", "Clinical units"], active=1)
radio_group_q = RadioButtonGroup(labels=["Flow", "Pressure", "Velocity"], active=1)
def plot_wave():
# Get the current slider values
a = dropdown.value
# b = int(locatn.value)
# c = int(r_age.value)
ci = radio_group_age.active
c = int(ages[ci])
units = radio_group.active
q = radio_group_q.active
# Generate the new curve
if q == 0:
iavg = df[(df["q"] == "Q") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "Q") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
elif q == 1:
iavg = df[(df["q"] == "P") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "P") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
elif q == 2:
iavg = df[(df["q"] == "u") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "u") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
if units == 1:
if q == 0:
plot.yaxis.axis_label = "Flow Q (ml/s)"
elif q == 1:
plot.yaxis.axis_label = "Pressure P (mmHg)"
elif q == 2:
iavg *= 100
istd *= 100
plot.yaxis.axis_label = "Velocity P (cm/s)"
else:
if q == 0:
iavg *= 1e-6
istd *= 1e-6
plot.yaxis.axis_label = "Flow Q (m^3/s)"
elif q == 1:
iavg *= 133.332
istd *= 133.332
plot.yaxis.axis_label = "Pressure P (kPa)"
elif q == 2:
plot.yaxis.axis_label = "Pressure P (m/s)"
x = np.linspace(0, 1, len(iavg))
source.data = dict(xs=[x, x, x], ys=[iavg-istd, iavg+istd, iavg],
colors=["silver", "silver", "black"])
qs = ["Volumetric flow rate", "Transmural pressure", "Blood velocity"]
if a.split("-")[0] == "20a":
idx = 19
elif a.split("-")[0] == "20b":
idx = 20
elif int(a.split("-")[0]) <= 19:
idx = int(a.split("-")[0])-1
else:
idx = int(a.split("-")[0])
plot.title.text = "{0} - {1}".format(arteries_menu[idx][0], qs[q])
table_source.data = dict(time=list(x), iavg=list(iavg), istd=list(istd))
button_plot = Button(label="Plot", button_type="success")
button_plot.on_click(plot_wave)
inputs = widgetbox(dropdown, radio_group_age, radio_group, radio_group_q, button_plot,
download_button)
curdoc().add_root(column(row(inputs, plot, width=800), data_table))
curdoc().title = "openBF-db" |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fqtrim(MakefilePackage):
"""fqtrim is a versatile stand-alone utility that can be used to trim
adapters, poly-A tails, terminal unknown bases (Ns) and low quality 3'
regions in reads from high-throughput next-generation sequencing
machines."""
homepage = "https://ccb.jhu.edu/software/fqtrim/"
url = "http://ccb.jhu.edu/software/fqtrim/dl/fqtrim-0.9.7.tar.gz"
version('0.9.7', sha256='4951538f69dde14a23fc4841ff020434d26eb9622c4e06b43c068c702aa3d0d6')
def build(self, spec, prefix):
make('release')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('fqtrim', prefix.bin)
|
import numpy as np
def data_upsampling(data,size):
data = np.kron(data, np.ones(size))
return data |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 11:22:57 2015
@author: Wasit
"""
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
def gen_data():
clmax=5
spc=1e3
theta_range=2
samples=np.zeros(spc*clmax,dtype=np.uint32)
I=np.zeros((spc*clmax,theta_range),dtype=np.float32)
mark_sym=['*','+','.','|','x','^','s','o']
mark_colr=['r','g','b','k']
N=8 #number of datasets being generated
#path="%.1g/"%spc
path="train/"
if not os.path.exists(path):
os.makedirs(path)
for n in xrange(N):
for cl in xrange(clmax):
xo=cl*spc
#define label
samples[xo:xo+spc]=cl
phi = np.linspace(0, 2*np.pi, spc) + \
np.random.randn(spc)*0.4*np.pi/clmax + \
2*np.pi*cl/clmax
r = np.linspace(0.1, 1, spc)
I[xo:xo+spc,:]=np.transpose(np.array([r*np.cos(phi), r*np.sin(phi)]))
#mark=mark_colr[cl%len(mark_colr)]+mark_sym[(cl//len(mark_colr))%len(mark_sym)]
#plt.plot(I[xo:xo+spc,0],I[xo:xo+spc,1],mark)
#plt.hold(True)
with open(path+'dataset%02d.pic'%(n), 'wb') as pickleFile:
#write label and feature vector
theta_dim=1
pickle.dump((clmax,theta_dim,theta_range,len(samples),samples,I,None), pickleFile, pickle.HIGHEST_PROTOCOL)
def train():
from pforest.master import master
m=master()
m.reset()
m.train()
pickleFile = open('out_tree.pic', 'wb')
pickle.dump(m.root, pickleFile, pickle.HIGHEST_PROTOCOL)
pickleFile.close()
def show_result():
import pickle
from matplotlib import pyplot as plt
from pforest.dataset import dataset
from pforest.tree import tree
pickleFile = open('out_tree.pic', 'rb')
root = pickle.load(pickleFile)
pickleFile.close()
#init the test tree
t=tree()
t.settree(root)
t.show()
#compute recall rate
dset=dataset()
correct=0;
for x in xrange(dset.size):
L=t.getL(np.array([x]),dset)
if dset.getL(x) == L:
correct=correct+1
dset.setL(x,L)
print("recall rate: {}%".format(correct/float(dset.size)*100))
#setup the new test-set
d=0.01
y, x = np.mgrid[slice(-1, 1+d, d), slice(-1, 1+d, d)]
#create dataset
dset2=dataset()
#start labeling
L=np.zeros(x.shape,dtype=int)
for r in xrange(x.shape[0]):
for c in xrange(x.shape[1]):
Prob=t.classify(( x[r,c],y[r,c] ))
L[r,c]=np.argmax(Prob)
#plot the lalbel out put
plt.close('all')
plt.axis([-1,1,-1,1])
plt.pcolor(x,y,L)
plt.show()
#overlaying new input data
plt.hold(True)
plt.set_cmap('jet')
marker=['bo','co','go','ro','mo','yo','ko',
'bs','cs','gs','rs','ms','ys','ks']
z=np.random.randint(0,dset.size,1000)
for i in z:
plt.plot(dset2.I[i,0],dset2.I[i,1],marker[dset2.samples[i]])
if __name__ == '__main__':
gen_data()
train()
show_result() |
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from .utils import get_xmodule
xmodule = get_xmodule()
xgemm = xmodule.xgemm
@ops.RegisterGradient("XGEMM")
def _xgemm_grad(op, grad):
"""
Gradient computation for the XGEMM
:param op: XGEMM operation that is differentiated
:param grad: gradient with respect to the output of XGEMM
:return: gradients with respect to the input matrices of the XGEMM
"""
a = op.inputs[0]
b = op.inputs[1]
grad_a = math_ops.matmul(grad, b, transpose_b=True)
grad_b = math_ops.matmul(a, grad, transpose_a=True)
return grad_a, grad_b
|
"""A TaskRecord backend using sqlite3
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import json
import os
import cPickle as pickle
from datetime import datetime
try:
import sqlite3
except ImportError:
sqlite3 = None
from zmq.eventloop import ioloop
from IPython.utils.traitlets import Unicode, Instance, List, Dict
from .dictdb import BaseDB
from IPython.utils.jsonutil import date_default, extract_dates, squash_dates
#-----------------------------------------------------------------------------
# SQLite operators, adapters, and converters
#-----------------------------------------------------------------------------
try:
buffer
except NameError:
# py3k
buffer = memoryview
operators = {
'$lt' : "<",
'$gt' : ">",
# null is handled weird with ==,!=
'$eq' : "=",
'$ne' : "!=",
'$lte': "<=",
'$gte': ">=",
'$in' : ('=', ' OR '),
'$nin': ('!=', ' AND '),
# '$all': None,
# '$mod': None,
# '$exists' : None
}
null_operators = {
'=' : "IS NULL",
'!=' : "IS NOT NULL",
}
def _adapt_dict(d):
return json.dumps(d, default=date_default)
def _convert_dict(ds):
if ds is None:
return ds
else:
if isinstance(ds, bytes):
# If I understand the sqlite doc correctly, this will always be utf8
ds = ds.decode('utf8')
return extract_dates(json.loads(ds))
def _adapt_bufs(bufs):
# this is *horrible*
# copy buffers into single list and pickle it:
if bufs and isinstance(bufs[0], (bytes, buffer)):
return sqlite3.Binary(pickle.dumps(map(bytes, bufs),-1))
elif bufs:
return bufs
else:
return None
def _convert_bufs(bs):
if bs is None:
return []
else:
return pickle.loads(bytes(bs))
#-----------------------------------------------------------------------------
# SQLiteDB class
#-----------------------------------------------------------------------------
class SQLiteDB(BaseDB):
"""SQLite3 TaskRecord backend."""
filename = Unicode('tasks.db', config=True,
help="""The filename of the sqlite task database. [default: 'tasks.db']""")
location = Unicode('', config=True,
help="""The directory containing the sqlite task database. The default
is to use the cluster_dir location.""")
table = Unicode("", config=True,
help="""The SQLite Table to use for storing tasks for this session. If unspecified,
a new table will be created with the Hub's IDENT. Specifying the table will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
if sqlite3 is not None:
_db = Instance('sqlite3.Connection')
else:
_db = None
# the ordered list of column names
_keys = List(['msg_id' ,
'header' ,
'content',
'buffers',
'submitted',
'client_uuid' ,
'engine_uuid' ,
'started',
'completed',
'resubmitted',
'result_header' ,
'result_content' ,
'result_buffers' ,
'queue' ,
'pyin' ,
'pyout',
'pyerr',
'stdout',
'stderr',
])
# sqlite datatypes for checking that db is current format
_types = Dict({'msg_id' : 'text' ,
'header' : 'dict text',
'content' : 'dict text',
'buffers' : 'bufs blob',
'submitted' : 'timestamp',
'client_uuid' : 'text',
'engine_uuid' : 'text',
'started' : 'timestamp',
'completed' : 'timestamp',
'resubmitted' : 'timestamp',
'result_header' : 'dict text',
'result_content' : 'dict text',
'result_buffers' : 'bufs blob',
'queue' : 'text',
'pyin' : 'text',
'pyout' : 'text',
'pyerr' : 'text',
'stdout' : 'text',
'stderr' : 'text',
})
def __init__(self, **kwargs):
super(SQLiteDB, self).__init__(**kwargs)
if sqlite3 is None:
raise ImportError("SQLiteDB requires sqlite3")
if not self.table:
# use session, and prefix _, since starting with # is illegal
self.table = '_'+self.session.replace('-','_')
if not self.location:
# get current profile
from IPython.core.application import BaseIPythonApplication
if BaseIPythonApplication.initialized():
app = BaseIPythonApplication.instance()
if app.profile_dir is not None:
self.location = app.profile_dir.location
else:
self.location = u'.'
else:
self.location = u'.'
self._init_db()
# register db commit as 2s periodic callback
# to prevent clogging pipes
# assumes we are being run in a zmq ioloop app
loop = ioloop.IOLoop.instance()
pc = ioloop.PeriodicCallback(self._db.commit, 2000, loop)
pc.start()
def _defaults(self, keys=None):
"""create an empty record"""
d = {}
keys = self._keys if keys is None else keys
for key in keys:
d[key] = None
return d
def _check_table(self):
"""Ensure that an incorrect table doesn't exist
If a bad (old) table does exist, return False
"""
cursor = self._db.execute("PRAGMA table_info(%s)"%self.table)
lines = cursor.fetchall()
if not lines:
# table does not exist
return True
types = {}
keys = []
for line in lines:
keys.append(line[1])
types[line[1]] = line[2]
if self._keys != keys:
# key mismatch
self.log.warn('keys mismatch')
return False
for key in self._keys:
if types[key] != self._types[key]:
self.log.warn(
'type mismatch: %s: %s != %s'%(key,types[key],self._types[key])
)
return False
return True
def _init_db(self):
"""Connect to the database and get new session number."""
# register adapters
sqlite3.register_adapter(dict, _adapt_dict)
sqlite3.register_converter('dict', _convert_dict)
sqlite3.register_adapter(list, _adapt_bufs)
sqlite3.register_converter('bufs', _convert_bufs)
# connect to the db
dbfile = os.path.join(self.location, self.filename)
self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
# isolation_level = None)#,
cached_statements=64)
# print dir(self._db)
first_table = self.table
i=0
while not self._check_table():
i+=1
self.table = first_table+'_%i'%i
self.log.warn(
"Table %s exists and doesn't match db format, trying %s"%
(first_table,self.table)
)
self._db.execute("""CREATE TABLE IF NOT EXISTS %s
(msg_id text PRIMARY KEY,
header dict text,
content dict text,
buffers bufs blob,
submitted timestamp,
client_uuid text,
engine_uuid text,
started timestamp,
completed timestamp,
resubmitted timestamp,
result_header dict text,
result_content dict text,
result_buffers bufs blob,
queue text,
pyin text,
pyout text,
pyerr text,
stdout text,
stderr text)
"""%self.table)
self._db.commit()
def _dict_to_list(self, d):
"""turn a mongodb-style record dict into a list."""
return [ d[key] for key in self._keys ]
def _list_to_dict(self, line, keys=None):
"""Inverse of dict_to_list"""
keys = self._keys if keys is None else keys
d = self._defaults(keys)
for key,value in zip(keys, line):
d[key] = value
return d
def _render_expression(self, check):
"""Turn a mongodb-style search dict into an SQL query."""
expressions = []
args = []
skeys = set(check.keys())
skeys.difference_update(set(self._keys))
skeys.difference_update(set(['buffers', 'result_buffers']))
if skeys:
raise KeyError("Illegal testing key(s): %s"%skeys)
for name,sub_check in check.iteritems():
if isinstance(sub_check, dict):
for test,value in sub_check.iteritems():
try:
op = operators[test]
except KeyError:
raise KeyError("Unsupported operator: %r"%test)
if isinstance(op, tuple):
op, join = op
if value is None and op in null_operators:
expr = "%s %s" % (name, null_operators[op])
else:
expr = "%s %s ?"%(name, op)
if isinstance(value, (tuple,list)):
if op in null_operators and any([v is None for v in value]):
# equality tests don't work with NULL
raise ValueError("Cannot use %r test with NULL values on SQLite backend"%test)
expr = '( %s )'%( join.join([expr]*len(value)) )
args.extend(value)
else:
args.append(value)
expressions.append(expr)
else:
# it's an equality check
if sub_check is None:
expressions.append("%s IS NULL" % name)
else:
expressions.append("%s = ?"%name)
args.append(sub_check)
expr = " AND ".join(expressions)
return expr, args
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
d = self._defaults()
d.update(rec)
d['msg_id'] = msg_id
line = self._dict_to_list(d)
tups = '(%s)'%(','.join(['?']*len(line)))
self._db.execute("INSERT INTO %s VALUES %s"%(self.table, tups), line)
# self._db.commit()
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
line = cursor.fetchone()
if line is None:
raise KeyError("No such msg: %r"%msg_id)
return self._list_to_dict(line)
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE %s SET "%self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?'%key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values)
# self._db.commit()
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._db.execute("""DELETE FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
# self._db.commit()
def drop_matching_records(self, check):
"""Remove a record from the DB."""
expr,args = self._render_expression(check)
query = "DELETE FROM %s WHERE %s"%(self.table, expr)
self._db.execute(query,args)
# self._db.commit()
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys:
bad_keys = [ key for key in keys if key not in self._keys ]
if bad_keys:
raise KeyError("Bad record key(s): %s"%bad_keys)
if keys:
# ensure msg_id is present and first:
if 'msg_id' in keys:
keys.remove('msg_id')
keys.insert(0, 'msg_id')
req = ', '.join(keys)
else:
req = '*'
expr,args = self._render_expression(check)
query = """SELECT %s FROM %s WHERE %s"""%(req, self.table, expr)
cursor = self._db.execute(query, args)
matches = cursor.fetchall()
records = []
for line in matches:
rec = self._list_to_dict(line, keys)
records.append(rec)
return records
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
query = """SELECT msg_id FROM %s ORDER by submitted ASC"""%self.table
cursor = self._db.execute(query)
# will be a list of length 1 tuples
return [ tup[0] for tup in cursor.fetchall()]
__all__ = ['SQLiteDB'] |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.AIMObject import AIMObject
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.DtcExterneReferentie import DtcExterneReferentie
from OTLMOW.OTLModel.Datatypes.KlVerkeersbordCategorie import KlVerkeersbordCategorie
from OTLMOW.OTLModel.Datatypes.KlVerkeersbordCode import KlVerkeersbordCode
from OTLMOW.OTLModel.Datatypes.KlVerkeersbordconceptStatus import KlVerkeersbordconceptStatus
from OTLMOW.OTLModel.Datatypes.StringField import StringField
from OTLMOW.GeometrieArtefact.GeenGeometrie import GeenGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class VerkeersbordConcept(AIMObject, GeenGeometrie):
"""Inhoudelijke definitie van de betekenis van een verkeersbord zoals opgenomen in de wegcode."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMObject.__init__(self)
GeenGeometrie.__init__(self)
self._afbeelding = OTLAttribuut(field=DtcDocument,
naam='afbeelding',
label='afbeelding',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.afbeelding',
kardinaliteit_max='*',
definition='De afbeelding van het verkeersbordconcept.',
owner=self)
self._betekenis = OTLAttribuut(field=StringField,
naam='betekenis',
label='betekenis',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.betekenis',
definition='Betekenis die gegeven wordt aan dit soort verkeersbord volgens de wegcode.',
owner=self)
self._rechtsgrondOnderdeel = OTLAttribuut(field=DtcExterneReferentie,
naam='rechtsgrondOnderdeel',
label='rechtsgrondonderdeel',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.rechtsgrondOnderdeel',
usagenote='Verwijst meestal naar een artikel in de wegcode die informatie over dit verkeersbordconcept bevat. Bijvoorbeeld: artikel 68.3 voor verbodsborden.',
definition='Verwijst naar een rechtsgrondonderdeel over dit verkeersbordconcept.',
owner=self)
self._status = OTLAttribuut(field=KlVerkeersbordconceptStatus,
naam='status',
label='status',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.status',
usagenote='Bijvoorbeeld: stabiel, onstabiel, afgeschaft. Een bord met snelheidslimiet van 60 km/u is bijvoorbeeld afgeschaft.',
definition='Duidt of het verkeersbordconcept nog gebruikt wordt.',
owner=self)
self._verkeersbordCategorie = OTLAttribuut(field=KlVerkeersbordCategorie,
naam='verkeersbordCategorie',
label='verkeersbord categorie',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.verkeersbordCategorie',
definition='Categorie van het verkeersbordconcept. .',
owner=self)
self._verkeersbordCode = OTLAttribuut(field=KlVerkeersbordCode,
naam='verkeersbordCode',
label='verkeersbordcode',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#VerkeersbordConcept.verkeersbordCode',
definition='Code die aan dit soort bord gegeven wordt binnen de wegcode.',
owner=self)
@property
def afbeelding(self):
"""De afbeelding van het verkeersbordconcept."""
return self._afbeelding.get_waarde()
@afbeelding.setter
def afbeelding(self, value):
self._afbeelding.set_waarde(value, owner=self)
@property
def betekenis(self):
"""Betekenis die gegeven wordt aan dit soort verkeersbord volgens de wegcode."""
return self._betekenis.get_waarde()
@betekenis.setter
def betekenis(self, value):
self._betekenis.set_waarde(value, owner=self)
@property
def rechtsgrondOnderdeel(self):
"""Verwijst naar een rechtsgrondonderdeel over dit verkeersbordconcept."""
return self._rechtsgrondOnderdeel.get_waarde()
@rechtsgrondOnderdeel.setter
def rechtsgrondOnderdeel(self, value):
self._rechtsgrondOnderdeel.set_waarde(value, owner=self)
@property
def status(self):
"""Duidt of het verkeersbordconcept nog gebruikt wordt."""
return self._status.get_waarde()
@status.setter
def status(self, value):
self._status.set_waarde(value, owner=self)
@property
def verkeersbordCategorie(self):
"""Categorie van het verkeersbordconcept. ."""
return self._verkeersbordCategorie.get_waarde()
@verkeersbordCategorie.setter
def verkeersbordCategorie(self, value):
self._verkeersbordCategorie.set_waarde(value, owner=self)
@property
def verkeersbordCode(self):
"""Code die aan dit soort bord gegeven wordt binnen de wegcode."""
return self._verkeersbordCode.get_waarde()
@verkeersbordCode.setter
def verkeersbordCode(self, value):
self._verkeersbordCode.set_waarde(value, owner=self)
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.db import models
from django.contrib.auth.models import User
from django.db.models.base import Model
# Create your models here.
class Car(models.Model):
car_brand = models.CharField(max_length=150, help_text='Car Brand')
car_model = models.CharField(max_length=150, help_text='Car Model')
investor = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.car_brand} - {self.car_model} {self.investor.username}'
class Revenue(models.Model):
revenue = models.IntegerField(blank=True, null=True)
car = models.ForeignKey(Car, on_delete=models.CASCADE)
date = models.DateField()
event_type = models.CharField( default='revenue' ,max_length=150 ,blank=True, null=True)
class Accident(models.Model):
FAULT=[
('me',"me"),
("thirdparty", "thirdparty")
]
car = models.ForeignKey(Car, on_delete=models.CASCADE)
accident = models.CharField(help_text="Where accident occur?",max_length=150 ,blank=True, null=True)
fault = models.CharField(help_text="Who's at fault?", max_length=150, choices=FAULT, blank=True, null=True)
totaled = models.CharField(help_text="Was car totaled?", max_length=150,blank=True, null=True)
photo = models.ImageField(upload_to='cars', help_text='Upload all images and documents related to accident',blank=True, null=True)
date = models.DateField(blank=True, null=True)
event_type = models.CharField( default='accident' ,max_length=150 ,blank=True, null=True)
class Maintenance(models.Model):
car = models.ForeignKey(Car, on_delete=models.CASCADE)
type = models.CharField(help_text="what type of maintance?", max_length=150, blank=True, null=True)
photo = models.ImageField(upload_to='cars', help_text='Upload all images and documents related to accident',blank=True, null=True)
value = models.IntegerField(help_text="How much?",blank=True, null=True)
cost = models.IntegerField(help_text="Cost?",blank=True, null=True)
approve = models.BooleanField(help_text="Approved?", blank=True, null=True )
event_type = models.CharField( default='maintenance' ,max_length=150 ,blank=True, null=True)
date = models.DateField(blank=True, null=True)
|
""" Include a dump of snapshot data from the live system"""
SNAPSHOTS_LIST = {
"snapshots": [
{
"duration_in_millis": 54877,
"end_time": "2018-05-06T05:00:54.937Z",
"end_time_in_millis": 1525582854937,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-06_0600",
"start_time": "2018-05-06T05:00:00.060Z",
"start_time_in_millis": 1525582800060,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 53051,
"end_time": "2018-05-07T05:00:53.176Z",
"end_time_in_millis": 1525669253176,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-07_0600",
"start_time": "2018-05-07T05:00:00.125Z",
"start_time_in_millis": 1525669200125,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 45674,
"end_time": "2018-05-08T05:00:45.990Z",
"end_time_in_millis": 1525755645990,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-08_0600",
"start_time": "2018-05-08T05:00:00.316Z",
"start_time_in_millis": 1525755600316,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 52140,
"end_time": "2018-05-09T05:00:51.611Z",
"end_time_in_millis": 1525842051611,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-09_0600",
"start_time": "2018-05-09T04:59:59.471Z",
"start_time_in_millis": 1525841999471,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 43258,
"end_time": "2018-05-10T05:00:42.828Z",
"end_time_in_millis": 1525928442828,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-10_0600",
"start_time": "2018-05-10T04:59:59.570Z",
"start_time_in_millis": 1525928399570,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 49055,
"end_time": "2018-05-11T05:00:48.678Z",
"end_time_in_millis": 1526014848678,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-11_0600",
"start_time": "2018-05-11T04:59:59.623Z",
"start_time_in_millis": 1526014799623,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 42105,
"end_time": "2018-05-12T05:00:42.006Z",
"end_time_in_millis": 1526101242006,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-12_0600",
"start_time": "2018-05-12T04:59:59.901Z",
"start_time_in_millis": 1526101199901,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 36901,
"end_time": "2018-05-13T05:00:36.604Z",
"end_time_in_millis": 1526187636604,
"failures": [
{
"index": "doaj_v1",
"node_id": "RTVV05UoToubyPr9ErBlKg",
"reason": "node shutdown",
"shard_id": 1,
"status": "INTERNAL_SERVER_ERROR"
},
{
"index": "doaj_v1",
"node_id": "RTVV05UoToubyPr9ErBlKg",
"reason": "node shutdown",
"shard_id": 2,
"status": "INTERNAL_SERVER_ERROR"
}
],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 2,
"successful": 0,
"total": 2
},
"snapshot": "snapshot_2018-05-13_0600",
"start_time": "2018-05-13T04:59:59.703Z",
"start_time_in_millis": 1526187599703,
"state": "PARTIAL",
"version": "1.7.5",
"version_id": 1070599
},
{
"duration_in_millis": 40825,
"end_time": "2018-05-14T05:00:40.714Z",
"end_time_in_millis": 1526274040714,
"failures": [],
"indices": [
"doaj_v1"
],
"shards": {
"failed": 0,
"successful": 6,
"total": 6
},
"snapshot": "snapshot_2018-05-14_0600",
"start_time": "2018-05-14T04:59:59.889Z",
"start_time_in_millis": 1526273999889,
"state": "SUCCESS",
"version": "1.7.5",
"version_id": 1070599
}
]
}
|
# -*- coding: utf-8 -*-
"""Tests for `.db` module."""
import pytest
from psycopg2 import OperationalError, sql
from tile_processor import db
class TestDB:
"""Testing config.db"""
def test_failed_connection(self, bag3d_db):
"""Failed connection raises OperationalError"""
with pytest.raises(OperationalError) as excinfo:
# invalid dbname
db.Db(
dbname="invalid",
host=bag3d_db.host,
port=bag3d_db.port,
user=bag3d_db.user,
)
assert 'database "invalid" does not exist' in str(excinfo.value)
with pytest.raises(OperationalError) as excinfo:
# invalid host
db.Db(
dbname=bag3d_db.dbname,
host="invalid",
port=bag3d_db.port,
user=bag3d_db.user,
)
assert 'could not translate host name "invalid" to address' in str(
excinfo.value
)
with pytest.raises(OperationalError) as excinfo:
# invalid port
db.Db(
dbname=bag3d_db.dbname,
host=bag3d_db.host,
port=1,
user=bag3d_db.user,
)
assert "TCP/IP connections on port 1?" in str(excinfo.value)
with pytest.raises(OperationalError) as excinfo:
# invalid user
db.Db(
dbname=bag3d_db.dbname,
host=bag3d_db.host,
port=bag3d_db.port,
user="invalid",
)
class TestSchema:
@pytest.fixture(scope="class")
def relations(self):
relations = {
"schema": "tile_index",
"table": "bag_index_test",
"fields": {
"geometry": "geom",
"primary_key": "id",
"unit_name": "bladnr",
},
}
yield relations
def test_init(self, relations):
index = db.Schema(relations)
assert index.schema.string == "tile_index"
assert index.schema.sqlid == sql.Identifier("tile_index")
def test_concatenate(self, relations):
index = db.Schema(relations)
result = index.schema + index.table
assert result == sql.Identifier("tile_index", "bag_index_test")
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This is the test suite for a vtgate_client implementation.
"""
import struct
from google.protobuf import text_format
from vtdb import dbexceptions
from vtdb import keyrange
from vtdb import keyrange_constants
from vtdb import vtgate_client
from vtdb import vtgate_cursor
from vtproto import query_pb2
from vtproto import topodata_pb2
class TestPythonClientBase(object):
"""Base class for Python client tests."""
# conn must be opened as a vtgate_client connection.
# This is not done by this class.
conn = None
# A packed keyspace_id from the middle of the full keyrange.
KEYSPACE_ID_0X80 = struct.Struct('!Q').pack(0x80 << 56)
def _open_v3_cursor(self):
return self.conn.cursor(keyspace=None, tablet_type='master')
def _open_shards_cursor(self):
return self.conn.cursor(
tablet_type='master', keyspace='keyspace', shards=['-80'])
def _open_keyspace_ids_cursor(self):
return self.conn.cursor(
tablet_type='master', keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80])
def _open_keyranges_cursor(self):
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
return self.conn.cursor(
tablet_type='master', keyspace='keyspace', keyranges=[kr])
def _open_batch_cursor(self):
return self.conn.cursor(tablet_type='master', keyspace=None)
def _open_stream_v3_cursor(self):
return self.conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
def _open_stream_shards_cursor(self):
return self.conn.cursor(
tablet_type='master', keyspace='keyspace', shards=['-80'],
cursorclass=vtgate_cursor.StreamVTGateCursor)
def _open_stream_keyspace_ids_cursor(self):
return self.conn.cursor(
tablet_type='master', keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80],
cursorclass=vtgate_cursor.StreamVTGateCursor)
def _open_stream_keyranges_cursor(self):
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
return self.conn.cursor(
tablet_type='master', keyspace='keyspace', keyranges=[kr],
cursorclass=vtgate_cursor.StreamVTGateCursor)
class TestErrors(TestPythonClientBase):
"""Test cases to verify that the Python client can handle errors correctly."""
def _verify_exception_for_execute(self, query, exception):
"""Verify that we raise a specific exception for all Execute calls.
Args:
query: query string to use for execute calls.
exception: exception class that we expect the execute call to raise.
"""
# Execute test
cursor = self._open_v3_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteShards test
cursor = self._open_shards_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteKeyspaceIds test
cursor = self._open_keyspace_ids_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteKeyRanges test
cursor = self._open_keyranges_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteEntityIds test
cursor = self.conn.cursor(tablet_type='master', keyspace='keyspace')
with self.assertRaises(exception):
cursor.execute(
query, {},
entity_keyspace_id_map={1: self.KEYSPACE_ID_0X80},
entity_column_name='user_id')
cursor.close()
# ExecuteBatchKeyspaceIds test
cursor = self._open_batch_cursor()
with self.assertRaises(exception):
cursor.executemany(
sql=None,
params_list=[
dict(
sql=query,
bind_variables={},
keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80])])
cursor.close()
# ExecuteBatchShards test
cursor = self._open_batch_cursor()
with self.assertRaises(exception):
cursor.executemany(
sql=None,
params_list=[
dict(
sql=query,
bind_variables={},
keyspace='keyspace',
shards=['0'])])
cursor.close()
def _verify_exception_for_stream_execute(self, query, exception):
"""Verify that we raise a specific exception for all StreamExecute calls.
Args:
query: query string to use for StreamExecute calls.
exception: exception class that we expect StreamExecute to raise.
"""
# StreamExecute test
cursor = self._open_stream_v3_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# StreamExecuteShards test
cursor = self._open_stream_shards_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# StreamExecuteKeyspaceIds test
cursor = self._open_stream_keyspace_ids_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# StreamExecuteKeyRanges test
cursor = self._open_stream_keyranges_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# UpdateStream test
with self.assertRaises(exception):
for _, _ in self.conn.update_stream(
'test_keyspace', topodata_pb2.MASTER,
shard=query):
pass
def test_partial_integrity_errors(self):
"""Raise an IntegrityError when Execute returns a partial error."""
# Special query that makes vtgateclienttest return a partial error.
self._verify_exception_for_execute(
'partialerror://integrity error',
dbexceptions.IntegrityError)
def _verify_exception_for_all_execute_methods(self, query, exception):
self._verify_exception_for_execute(query, exception)
self._verify_exception_for_stream_execute(query, exception)
def test_integrity_error(self):
"""Test we raise dbexceptions.IntegrityError."""
self._verify_exception_for_all_execute_methods(
'error://integrity error',
dbexceptions.IntegrityError)
def test_transient_error(self):
"""Test we raise dbexceptions.TransientError for Execute calls."""
# Special query that makes vtgateclienttest return a TransientError.
self._verify_exception_for_all_execute_methods(
'error://transient error',
dbexceptions.TransientError)
def test_throttled_error(self):
"""Test we raise dbexceptions.ThrottledError."""
# Special query that makes vtgateclienttest return a ThrottledError.
self._verify_exception_for_all_execute_methods(
'error://throttled error',
dbexceptions.ThrottledError)
def test_query_not_served_error(self):
"""Test we raise dbexceptions.QueryNotServed."""
# Special query that makes vtgateclienttest return QueryNotServed.
self._verify_exception_for_all_execute_methods(
'error://query not served',
dbexceptions.QueryNotServed)
def test_programming_error(self):
"""Test we raise dbexceptions.ProgrammingError."""
# Special query that makes vtgateclienttest return a ProgrammingError.
self._verify_exception_for_all_execute_methods(
'error://bad input',
dbexceptions.ProgrammingError)
def test_error(self):
"""Test a regular server error raises the right exception."""
error_request = 'error://unknown error'
error_caller_id = vtgate_client.CallerID(principal=error_request)
# Begin test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# Commit test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# Rollback test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# GetSrvKeyspace test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.get_srv_keyspace(error_request)
class TestTransactionFlags(TestPythonClientBase):
"""Test transaction flags."""
def test_begin(self):
"""Test begin transaction flags."""
self.conn.begin()
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'single db'):
self.conn.begin(single_db=True)
self.conn.commit()
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'twopc'):
self.conn.commit(twopc=True)
class TestSuccess(TestPythonClientBase):
"""Success test cases for the Python client."""
def test_success_get_srv_keyspace(self):
"""Test we get the right results from get_srv_keyspace.
We only test the successful cases.
"""
# big has one big shard
big = self.conn.get_srv_keyspace('big')
self.assertEquals(big.name, 'big')
self.assertEquals(big.sharding_col_name, 'sharding_column_name')
self.assertEquals(big.sharding_col_type, keyrange_constants.KIT_UINT64)
self.assertEquals(big.served_from, {'master': 'other_keyspace'})
self.assertEquals(big.get_shards('replica'),
[{'Name': 'shard0',
'KeyRange': {
'Start': '\x40\x00\x00\x00\x00\x00\x00\x00',
'End': '\x80\x00\x00\x00\x00\x00\x00\x00',
}}])
self.assertEquals(big.get_shard_count('replica'), 1)
self.assertEquals(big.get_shard_count('rdonly'), 0)
self.assertEquals(big.get_shard_names('replica'), ['shard0'])
self.assertEquals(big.keyspace_id_to_shard_name_for_db_type(
0x6000000000000000, 'replica'), 'shard0')
with self.assertRaises(ValueError):
big.keyspace_id_to_shard_name_for_db_type(0x2000000000000000, 'replica')
# small has no shards
small = self.conn.get_srv_keyspace('small')
self.assertEquals(small.name, 'small')
self.assertEquals(small.sharding_col_name, '')
self.assertEquals(small.sharding_col_type, keyrange_constants.KIT_UNSET)
self.assertEquals(small.served_from, {})
self.assertEquals(small.get_shards('replica'), [])
self.assertEquals(small.get_shard_count('replica'), 0)
with self.assertRaises(ValueError):
small.keyspace_id_to_shard_name_for_db_type(0x6000000000000000, 'replica')
class TestCallerId(TestPythonClientBase):
"""Caller ID test cases for the Python client."""
def test_effective_caller_id(self):
"""Test that the passed in effective_caller_id is parsed correctly.
Pass a special sql query that sends the expected
effective_caller_id through different vtgate interfaces. Make sure
the good_effective_caller_id works, and the
bad_effective_caller_id raises a DatabaseError.
"""
# Special query that makes vtgateclienttest match effective_caller_id.
effective_caller_id_test_query = (
'callerid://{"principal":"pr", "component":"co", "subcomponent":"su"}')
good_effective_caller_id = vtgate_client.CallerID(
principal='pr', component='co', subcomponent='su')
bad_effective_caller_id = vtgate_client.CallerID(
principal='pr_wrong', component='co_wrong', subcomponent='su_wrong')
def check_good_and_bad_effective_caller_ids(cursor, cursor_execute_method):
cursor.set_effective_caller_id(good_effective_caller_id)
with self.assertRaises(dbexceptions.DatabaseError) as cm:
cursor_execute_method(cursor)
self.assertIn('SUCCESS:', str(cm.exception))
cursor.set_effective_caller_id(bad_effective_caller_id)
with self.assertRaises(dbexceptions.DatabaseError) as cm:
cursor_execute_method(cursor)
self.assertNotIn('SUCCESS:', str(cm.exception))
# test Execute
def cursor_execute_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_v3_cursor(), cursor_execute_method)
# test ExecuteShards
def cursor_execute_shards_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_shards_cursor(), cursor_execute_shards_method)
# test ExecuteKeyspaceIds
def cursor_execute_keyspace_ids_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_keyspace_ids_cursor(), cursor_execute_keyspace_ids_method)
# test ExecuteKeyRanges
def cursor_execute_key_ranges_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_keyranges_cursor(), cursor_execute_key_ranges_method)
# test ExecuteEntityIds
def cursor_execute_entity_ids_method(cursor):
cursor.execute(
effective_caller_id_test_query, {},
entity_keyspace_id_map={1: self.KEYSPACE_ID_0X80},
entity_column_name='user_id')
check_good_and_bad_effective_caller_ids(
self.conn.cursor(tablet_type='master', keyspace='keyspace'),
cursor_execute_entity_ids_method)
# test ExecuteBatchKeyspaceIds
def cursor_execute_batch_keyspace_ids_method(cursor):
cursor.executemany(
sql=None,
params_list=[dict(
sql=effective_caller_id_test_query, bind_variables={},
keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80])])
check_good_and_bad_effective_caller_ids(
self._open_batch_cursor(), cursor_execute_batch_keyspace_ids_method)
# test ExecuteBatchShards
def cursor_execute_batch_shard_method(cursor):
cursor.executemany(
sql=None,
params_list=[dict(
sql=effective_caller_id_test_query, bind_variables={},
keyspace='keyspace',
shards=['0'])])
check_good_and_bad_effective_caller_ids(
self._open_batch_cursor(), cursor_execute_batch_shard_method)
# test StreamExecute
def cursor_stream_execute_v3_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_v3_cursor(),
cursor_stream_execute_v3_method)
# test StreamExecuteShards
def cursor_stream_execute_shards_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_shards_cursor(),
cursor_stream_execute_shards_method)
# test StreamExecuteKeyspaceIds
def cursor_stream_execute_keyspace_ids_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_keyspace_ids_cursor(),
cursor_stream_execute_keyspace_ids_method)
# test StreamExecuteKeyRanges
def cursor_stream_execute_keyranges_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_keyranges_cursor(),
cursor_stream_execute_keyranges_method)
class TestEcho(TestPythonClientBase):
"""Send queries to the server, check the returned result matches."""
echo_prefix = 'echo://'
query = (
u'test query with bind variables: :int :float :bytes, unicode: '
u'\u6211\u80fd\u541e\u4e0b\u73bb\u7483\u800c\u4e0d\u50b7\u8eab\u9ad4'
).encode('utf-8')
query_echo = (
u'test query with bind variables: :int :float :bytes, unicode: '
u'\u6211\u80fd\u541e\u4e0b\u73bb\u7483\u800c\u4e0d\u50b7\u8eab\u9ad4'
).encode('utf-8')
keyspace = 'test_keyspace'
shards = ['-80', '80-']
shards_echo = '[-80 80-]'
keyspace_ids = ['\x01\x02\x03\x04', '\x05\x06\x07\x08']
keyspace_ids_echo = '[[1 2 3 4] [5 6 7 8]]'
# FIXME(alainjobart) using a map for the entities makes it impossible to
# guarantee the order of the entities in the query. It is really an API
# problem here? For this test, however, I'll just use a single value for now
entity_keyspace_ids = {
123: '\x01\x02\x03',
# 2.0: '\x04\x05\x06',
# '\x01\x02\x03': '\x07\x08\x09',
}
# entity_keyspace_ids_echo = ('[type:INT64 value:"123" '
# 'keyspace_id:"\\001\\002\\003" '
# 'type:FLOAT64 value:"2" '
# 'keyspace_id:"\\004\\005\\006" '
# 'type:VARBINARY value:"\\001\\002\\003" '
# 'keyspace_id:"\\007\\010\\t" ]')
entity_keyspace_ids_echo = ('[type:INT64 value:"123" '
'keyspace_id:"\\001\\002\\003" ]')
key_ranges = [keyrange.KeyRange('01020304-05060708')]
key_ranges_echo = '[start:"\\001\\002\\003\\004" end:"\\005\\006\\007\\010" ]'
tablet_type = 'replica'
tablet_type_echo = 'REPLICA'
bind_variables = {
'int': 123,
'float': 2.1,
'bytes': '\x01\x02\x03',
'bool': True,
}
bind_variables_echo = ('map[bool:type:INT64 value:"1" '
'bytes:type:VARBINARY value:"\\001\\002\\003" '
'float:type:FLOAT64 value:"2.1" '
'int:type:INT64 value:"123" ]')
caller_id = vtgate_client.CallerID(
principal='test_principal',
component='test_component',
subcomponent='test_subcomponent')
caller_id_echo = ('principal:"test_principal" component:"test_component"'
' subcomponent:"test_subcomponent" ')
event_token = query_pb2.EventToken(timestamp=123,
shard=shards[0],
position='test_pos')
options_echo = ('include_event_token:true compare_event_token:'
'<timestamp:123 shard:"-80" position:"test_pos" > ')
def test_echo_execute(self):
"""This test calls the echo method."""
# Execute
cursor = self.conn.cursor(tablet_type=self.tablet_type, keyspace=None)
cursor.set_effective_caller_id(self.caller_id)
cursor.execute(self.echo_prefix+self.query, self.bind_variables,
include_event_token=True,
compare_event_token=self.event_token)
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
# FIXME(alainjobart) change this to query_echo once v3 understand binds
'query': self.echo_prefix+self.query,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'options': self.options_echo,
'fresher': True,
'eventToken': self.event_token,
})
cursor.close()
# ExecuteShards
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=self.keyspace,
shards=self.shards)
cursor.set_effective_caller_id(self.caller_id)
cursor.execute(self.echo_prefix+self.query, self.bind_variables,
include_event_token=True,
compare_event_token=self.event_token)
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'shards': self.shards_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'options': self.options_echo,
'fresher': True,
'eventToken': self.event_token,
})
cursor.close()
# ExecuteKeyspaceIds
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=self.keyspace,
keyspace_ids=self.keyspace_ids)
cursor.set_effective_caller_id(self.caller_id)
cursor.execute(self.echo_prefix+self.query, self.bind_variables,
include_event_token=True,
compare_event_token=self.event_token)
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'keyspaceIds': self.keyspace_ids_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'options': self.options_echo,
'fresher': True,
'eventToken': self.event_token,
})
cursor.close()
# ExecuteKeyRanges
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=self.keyspace,
keyranges=self.key_ranges)
cursor.set_effective_caller_id(self.caller_id)
cursor.execute(self.echo_prefix+self.query, self.bind_variables,
include_event_token=True,
compare_event_token=self.event_token)
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'keyRanges': self.key_ranges_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
})
cursor.close()
# ExecuteEntityIds
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=self.keyspace)
cursor.set_effective_caller_id(self.caller_id)
cursor.execute(self.echo_prefix+self.query, self.bind_variables,
entity_keyspace_id_map=self.entity_keyspace_ids,
entity_column_name='column1',
include_event_token=True,
compare_event_token=self.event_token)
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'entityColumnName': 'column1',
'entityIds': self.entity_keyspace_ids_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'options': self.options_echo,
'fresher': True,
'eventToken': self.event_token,
})
cursor.close()
# ExecuteBatchShards
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=None,
as_transaction=True)
cursor.set_effective_caller_id(self.caller_id)
cursor.executemany(sql=None,
params_list=[
dict(
sql=self.echo_prefix+self.query,
bind_variables=self.bind_variables,
keyspace=self.keyspace,
shards=self.shards)])
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'shards': self.shards_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'asTransaction': 'true',
})
cursor.close()
# ExecuteBatchKeyspaceIds
cursor = self.conn.cursor(
tablet_type=self.tablet_type, keyspace=None,
as_transaction=True)
cursor.set_effective_caller_id(self.caller_id)
cursor.executemany(sql=None,
params_list=[
dict(
sql=self.echo_prefix+self.query,
bind_variables=self.bind_variables,
keyspace=self.keyspace,
keyspace_ids=self.keyspace_ids)])
self._check_echo(cursor, {
'callerId': self.caller_id_echo,
'query': self.echo_prefix+self.query_echo,
'keyspace': self.keyspace,
'keyspaceIds': self.keyspace_ids_echo,
'bindVars': self.bind_variables_echo,
'tabletType': self.tablet_type_echo,
'asTransaction': 'true',
})
cursor.close()
def _get_echo(self, cursor):
result = {}
data = cursor.fetchall()
for i, (n, _) in enumerate(cursor.description):
result[n] = data[0][i]
return result
def _check_echo(self, cursor, values):
"""_check_echo makes sure the echo result is correct."""
got = self._get_echo(cursor)
for k, v in values.iteritems():
if k == 'fresher':
self.assertTrue(self.conn.fresher)
elif k == 'eventToken':
self.assertEqual(text_format.MessageToString(self.conn.event_token),
text_format.MessageToString(v))
else:
self.assertEqual(got[k], v, 'item %s is different in result: got %s'
' expected %s' % (k, got[k], v))
# Check NULL and empty string.
self.assertEqual(got['null'], None)
self.assertEqual(got['emptyString'], '')
|
# Todo: Test replay_events().
|
from paddle_prompt.templates.base_template import Template
class ManualTemplate(Template):
"""the Base abstract Template is in manual mode, there are no changes here
"""
pass
|
from .aux import create_graph_from_string, convert_graph_to_string
from .node_matcher import StringNodeMatcher
from .graph_builder import GraphBuilder
from .match import MatchException
from .code_container import CodeContainerFactory
def convert_special_characters_to_spaces(line):
line = line.replace('\t', ' ')
line = line.replace('\n', ' ')
return line
class GraphDatabase:
def __init__(self, g, node_matcher=StringNodeMatcher(), code_container_factory=CodeContainerFactory()):
"""
This class interprets the commands translates them into operations on a graph by calling GraphBuilder().
It accepts a graph as an argument and performs operations onto it.
:param g: The graph to perform operations onto
:param node_matcher: The class that decides if two nodes match
:param code_container_factory: the class that creates the object that executes the LISP code
"""
self.g = g
self.node_matcher = node_matcher
self.action_list = ['MATCH ', 'CREATE ', 'DELETE ', 'RETURN', 'SET ', 'WHERE ']
self.action_dict = {'MATCH': self.__match,
'CREATE': self.__create,
'DELETE': self.__delete,
'SET': self.__set,
'WHERE': self.__where,
}
self.code_container_factory = code_container_factory
def query(self, string, repeat_n_times=None):
"""
This method performs the operations onto self.g
:param string: The list of operations to perform. The sequences of commands should be separated by a semicolon
An example might be
CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2),
{'tag': 'PLACE', 'text': 'London'}(v2)
MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b)
WHERE (= (get _a "text") "joseph")
RETURN _a,_b;
:param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of
the return list. If None then the value is set by the function
self.__determine_how_many_times_to_repeat_query(string)
:return: If the RETURN command is called with a list of variables names, a list of JSON with
the corresponding properties is returned. If the RETURN command is used alone, a list with the entire
graph is returned. Otherwise it returns an empty list
"""
if not repeat_n_times:
repeat_n_times = self.__determine_how_many_times_to_repeat_query(string)
lines = self.__get_command_lines(string)
return_list = []
for line in lines:
lst = self.__query_n_times(line, repeat_n_times)
if lst and lst[0]:
return_list = lst
return return_list
def get_graph(self):
return self.g
# Private
def __query_n_times(self, line, n):
rows = []
for i in range(n):
try:
builder = GraphBuilder(self.g, self.node_matcher, self.code_container_factory, match_index=i)
results = self.__query_with_builder(line, builder)
rows.append(results)
if not results:
break
except MatchException:
break
return rows
def __query_with_builder(self, string, builder):
"""
Uses the builder in the argument to modify the graph, according to the commands in the string
:param string: The single query to the database
:return: The result of the RETURN operation
"""
action_graph_pairs = self.__get_action_graph_pairs_from_query(string)
for action, graph_str in action_graph_pairs:
if action == 'RETURN' or action == '':
return self.__return(graph_str, builder)
try:
self.action_dict[action](graph_str, builder)
except MatchException:
break
return {}
def __get_action_graph_pairs_from_query(self, query):
"""
Splits the query into command/argument pairs, for example [("MATCH","{}(_a))", ("RETURN","_a")]
:param query: The string with the list of commands
:return: the command/argument pairs
"""
import re
query = convert_special_characters_to_spaces(query)
graph_list = re.split('|'.join(self.action_list), query)
query_list_positions = [query.find(graph) for graph in graph_list]
query_list_positions = query_list_positions
query_list_positions = query_list_positions
action_list = [query[query_list_positions[i] + len(graph_list[i]):query_list_positions[i + 1]].strip()
for i in range(len(graph_list) - 1)]
graph_list = graph_list[1:]
return zip(action_list, graph_list)
def __match(self, graph_str, builder):
graph = create_graph_from_string(graph_str)
builder.match_graph(graph)
def __create(self, graph_str, builder):
graph = create_graph_from_string(graph_str)
builder.add_graph(graph)
def __delete(self, graph_str, builder):
variables = [v for v in graph_str.strip().replace(' ', '').split(',') if v]
builder.delete_list(variables)
def __return(self, graph_str, builder):
variables = [v for v in graph_str.strip().replace(' ', '').split(',') if v]
if not variables:
return {'GRAPH': convert_graph_to_string(builder.build())}
return builder.build_variables(variables)
def __set(self, graph_str, builder):
builder.set(graph_str)
return True
def __where(self, graph_str, builder):
builder.where(graph_str)
return True
def __get_command_lines(self, string):
lines = []
for line in string.split('\n'):
if not line.strip() or line.strip()[0] == '#':
continue
lines.append(line)
lines = '\n'.join(lines).split(';')
return lines
def __determine_how_many_times_to_repeat_query(self, query_string):
repeat_n_times = len(self.g.vs)
if query_string.find('CREATE') != -1:
repeat_n_times = 1
return repeat_n_times
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import tensorflow as tf
import util
# Data pipeline parameters
SHUFFLE_BUFFER = 10000
class MLPData:
def __init__(self, opts, data_path):
# Define data constants - these must be hardcoded.
# Warning: data requires categorical columns in order, then continuous columns in order, then sales column
self.CATEGORICAL_COLS = [
'Store', 'DayOfWeek', 'Promo', 'StateHoliday', 'SchoolHoliday', 'Year', 'Month', 'Day', 'Week', 'StoreType', 'Assortment',
'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval'
]
self.CONTINUOUS_COLS = [
'CompetitionDistance'
]
self.SYNTHETIC_NUM_ROWS = 807691
self.VOCAB_LENS = [1115, 7, 2, 4, 2, 3, 12, 31, 52, 4, 3, 12, 23, 2, 24, 8, 4]
self.NUM_CATEGORICAL = len(self.CATEGORICAL_COLS)
self.NUM_CONTINUOUS = len(self.CONTINUOUS_COLS)
self.NUM_COLS = self.NUM_CATEGORICAL + self.NUM_CONTINUOUS + 1 # Adding sales column
dtype = getattr(np, opts.dtypes[0])
self.data_path = data_path
# If using synthetic data, data_path = None. Generate placeholder data instead.
if not data_path:
print(" Using synthetic data")
self._size = self.SYNTHETIC_NUM_ROWS
self._data = np.ones([self._size, self.NUM_COLS], dtype=dtype)
else:
# Load CSV into a numpy array.
# First load in as FP32, as some columns may contain values > max(FP16). Then log such columns, then cast to 16.
self._data = np.genfromtxt(data_path, delimiter=',', dtype=np.float32, skip_header=1)
# Log the 'Sales' column. Sales column is at the end
self._data[:, -1] = np.log(self._data[:, -1])
# Log the 'CompetitionDistance' column otherwise moving_variance is too large for 16.16 to represent in BNorm update parameters
# Add 1 to avoid log(0) and map 0 -> log(0+1) -> 0
self._data[:, -2] = np.log(self._data[:, -2] + 1)
# Cast to dtype
self._data = self._data.astype(dtype)
# Store dataset size
self._size = self._data.shape[0]
# Determine log max Sales number
self._log_max_sales = np.amax(self._data[:, -1])
@property
def size(self):
return self._size
def get_dataset(self, opts, mode):
''' Create a tf Dataset with buffering, stats, etc. '''
dtype = getattr(np, opts.dtypes[0])
batch_size = opts.micro_batch_size if mode == util.Modes.TRAIN else opts.validation_batch_size
# Create tf Dataset from the numpy array
dataset = tf.data.Dataset.from_tensor_slices((self._data))
# Repeat, and shuffle if we're training
if mode == util.Modes.TRAIN:
dataset = dataset.apply(
tf.data.experimental.shuffle_and_repeat(SHUFFLE_BUFFER))
else:
dataset = dataset.repeat()
# Batch the data
dataset = dataset.batch(batch_size, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# Pipeline stats
stats_aggregator = tf.data.experimental.StatsAggregator()
dataset = dataset.apply(
tf.data.experimental.latency_stats("latency_stats"))
tf.add_to_collection(
tf.GraphKeys.SUMMARIES,
stats_aggregator.get_summary())
options = tf.data.Options()
options.experimental_stats.aggregator = stats_aggregator
dataset = dataset.with_options(options)
placeholders = {}
if mode == util.Modes.TRAIN:
placeholders['learning_rate'] = tf.placeholder(dtype, shape=[])
return dataset, placeholders
|
#
# PySNMP MIB module ENTERASYS-TACACS-CLIENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-TACACS-CLIENT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:50:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
InetAddress, InetAddressType, InetPortNumber = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetPortNumber")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter64, Gauge32, Bits, IpAddress, NotificationType, MibIdentifier, Counter32, Integer32, Unsigned32, ObjectIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter64", "Gauge32", "Bits", "IpAddress", "NotificationType", "MibIdentifier", "Counter32", "Integer32", "Unsigned32", "ObjectIdentity", "iso")
TruthValue, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "DisplayString")
etsysTacacsClientMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58))
etsysTacacsClientMIB.setRevisions(('2010-02-01 17:02', '2005-02-10 17:57',))
if mibBuilder.loadTexts: etsysTacacsClientMIB.setLastUpdated('201002011702Z')
if mibBuilder.loadTexts: etsysTacacsClientMIB.setOrganization('Enterasys Networks, Inc')
etsysTacacsClientObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1))
etsysTacacsClientControl = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1))
etsysTacacsClientSesnAuth = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2))
etsysTacacsClientServer = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3))
etsysTacacsClientSesnAuthEnabled = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1, 1), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthEnabled.setStatus('current')
etsysTacacsClientSesnAcctEnabled = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1, 2), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSesnAcctEnabled.setStatus('current')
etsysTacacsClientCmdAuthEnabled = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1, 3), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientCmdAuthEnabled.setStatus('current')
etsysTacacsClientCmdAcctEnabled = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1, 4), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientCmdAcctEnabled.setStatus('current')
etsysTacacsClientSingleConnection = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 1, 5), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSingleConnection.setStatus('current')
etsysTacacsClientSesnAuthService = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 1), SnmpAdminString().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthService.setStatus('current')
etsysTacacsClientSesnAuthTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 2), )
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthTable.setStatus('current')
etsysTacacsClientSesnAuthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 2, 1), ).setIndexNames((0, "ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthLevel"))
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthEntry.setStatus('current')
etsysTacacsClientSesnAuthLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("readonly", 1), ("readwrite", 2), ("superuser", 3), ("debug", 4))))
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthLevel.setStatus('current')
etsysTacacsClientSesnAuthAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 2, 1, 2), SnmpAdminString().clone('priv-lvl')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthAttribute.setStatus('current')
etsysTacacsClientSesnAuthValue = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 2, 2, 1, 3), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientSesnAuthValue.setStatus('current')
etsysTacacsClientServerTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1), )
if mibBuilder.loadTexts: etsysTacacsClientServerTable.setStatus('current')
etsysTacacsClientServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1), ).setIndexNames((0, "ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerIndex"))
if mibBuilder.loadTexts: etsysTacacsClientServerEntry.setStatus('current')
etsysTacacsClientServerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: etsysTacacsClientServerIndex.setStatus('current')
etsysTacacsClientServerAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 2), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysTacacsClientServerAddressType.setStatus('current')
etsysTacacsClientServerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 3), InetAddress().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysTacacsClientServerAddress.setStatus('current')
etsysTacacsClientServerPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 4), InetPortNumber().clone(49)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysTacacsClientServerPortNumber.setStatus('current')
etsysTacacsClientServerTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 180)).clone(10)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysTacacsClientServerTimeout.setStatus('current')
etsysTacacsClientServerSecret = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysTacacsClientServerSecret.setStatus('current')
etsysTacacsClientServerSecretEntered = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysTacacsClientServerSecretEntered.setStatus('current')
etsysTacacsClientServerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 1, 3, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: etsysTacacsClientServerStatus.setStatus('current')
etsysTacacsClientConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2))
etsysTacacsClientCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 1))
etsysTacacsClientGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 2))
etsysTacacsClientSessionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 2, 1)).setObjects(("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthEnabled"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAcctEnabled"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSingleConnection"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerAddressType"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerAddress"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerPortNumber"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerTimeout"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerSecret"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerSecretEntered"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientServerStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysTacacsClientSessionGroup = etsysTacacsClientSessionGroup.setStatus('current')
etsysTacacsClientCmdAuthGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 2, 2)).setObjects(("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientCmdAuthEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysTacacsClientCmdAuthGroup = etsysTacacsClientCmdAuthGroup.setStatus('current')
etsysTacacsClientCmdAcctGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 2, 3)).setObjects(("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientCmdAcctEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysTacacsClientCmdAcctGroup = etsysTacacsClientCmdAcctGroup.setStatus('current')
etsysTacacsClientSesnAuthGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 2, 4)).setObjects(("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthService"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthAttribute"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthValue"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysTacacsClientSesnAuthGroup = etsysTacacsClientSesnAuthGroup.setStatus('current')
etsysTacacsClientCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 58, 2, 1, 1)).setObjects(("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSessionGroup"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientCmdAuthGroup"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientCmdAcctGroup"), ("ENTERASYS-TACACS-CLIENT-MIB", "etsysTacacsClientSesnAuthGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysTacacsClientCompliance = etsysTacacsClientCompliance.setStatus('current')
mibBuilder.exportSymbols("ENTERASYS-TACACS-CLIENT-MIB", etsysTacacsClientSesnAcctEnabled=etsysTacacsClientSesnAcctEnabled, etsysTacacsClientSesnAuthEntry=etsysTacacsClientSesnAuthEntry, etsysTacacsClientServerTable=etsysTacacsClientServerTable, etsysTacacsClientCmdAuthGroup=etsysTacacsClientCmdAuthGroup, etsysTacacsClientGroups=etsysTacacsClientGroups, etsysTacacsClientObjects=etsysTacacsClientObjects, PYSNMP_MODULE_ID=etsysTacacsClientMIB, etsysTacacsClientServerSecret=etsysTacacsClientServerSecret, etsysTacacsClientServerTimeout=etsysTacacsClientServerTimeout, etsysTacacsClientServerAddress=etsysTacacsClientServerAddress, etsysTacacsClientCmdAuthEnabled=etsysTacacsClientCmdAuthEnabled, etsysTacacsClientServerStatus=etsysTacacsClientServerStatus, etsysTacacsClientCmdAcctGroup=etsysTacacsClientCmdAcctGroup, etsysTacacsClientConformance=etsysTacacsClientConformance, etsysTacacsClientSesnAuthTable=etsysTacacsClientSesnAuthTable, etsysTacacsClientCompliances=etsysTacacsClientCompliances, etsysTacacsClientMIB=etsysTacacsClientMIB, etsysTacacsClientServer=etsysTacacsClientServer, etsysTacacsClientCmdAcctEnabled=etsysTacacsClientCmdAcctEnabled, etsysTacacsClientSesnAuthGroup=etsysTacacsClientSesnAuthGroup, etsysTacacsClientSesnAuthEnabled=etsysTacacsClientSesnAuthEnabled, etsysTacacsClientServerSecretEntered=etsysTacacsClientServerSecretEntered, etsysTacacsClientServerEntry=etsysTacacsClientServerEntry, etsysTacacsClientSesnAuthService=etsysTacacsClientSesnAuthService, etsysTacacsClientServerAddressType=etsysTacacsClientServerAddressType, etsysTacacsClientSesnAuthValue=etsysTacacsClientSesnAuthValue, etsysTacacsClientSesnAuthLevel=etsysTacacsClientSesnAuthLevel, etsysTacacsClientSesnAuthAttribute=etsysTacacsClientSesnAuthAttribute, etsysTacacsClientServerIndex=etsysTacacsClientServerIndex, etsysTacacsClientControl=etsysTacacsClientControl, etsysTacacsClientSingleConnection=etsysTacacsClientSingleConnection, etsysTacacsClientServerPortNumber=etsysTacacsClientServerPortNumber, etsysTacacsClientSesnAuth=etsysTacacsClientSesnAuth, etsysTacacsClientCompliance=etsysTacacsClientCompliance, etsysTacacsClientSessionGroup=etsysTacacsClientSessionGroup)
|
import json
import requests
from datetime import datetime, timedelta
from src.celery.celery import app
from src.file.models import FileInfo, FilesProxy
from django.conf import settings
@app.task
def check_file(id):
file_obj = FileInfo.objects.get(id=id)
status = FileInfo.STATUS_OK
try:
file = file_obj.file
# do some check stuff
except:
status = FileInfo.STATUS_ERROR
file_obj.status = status
file_obj.dttm_end_check = datetime.now()
file_obj.save()
send_file_link_to_proxy(file_obj.id, status=status)
@app.task
def send_file_link_to_proxy(id, status='P'):
for proxy in FilesProxy.objects.all():
requests.post(
proxy.proxy_url,
json={'url': f'{settings.EXTERNAL_HOST.rstrip("/")}/rest_api/file/{id}/', 'status': status},
headers=json.loads(proxy.headers)
)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/74_callback.azureml.ipynb (unless otherwise specified).
__all__ = ['AzureMLCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
from azureml.core.run import Run
# Cell
class AzureMLCallback(Callback):
"Log losses, metrics, model architecture summary to AzureML"
order = Recorder.order+1
def before_fit(self):
self.run = Run.get_context()
self.run.log("n_epoch", self.learn.n_epoch)
self.run.log("model_class", str(type(self.learn.model)))
try:
summary_file = Path("outputs") / 'model_summary.txt'
with summary_file.open("w") as f:
f.write(repr(self.learn.model))
except:
print('Did not log model summary. Check if your model is PyTorch model.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
# self.run.log('batch__smooth_loss', self.learn.smooth_loss)
self.run.log('batch__loss', self.learn.loss)
self.run.log('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.run.log(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']:
self.run.log(f'epoch__{n}', v)
if n == 'time':
self.run.log(f'epoch__{n}', str(v)) |
#!/usr/bin/env python
# coding: utf-8
# # AIChE 2019 pMuTT Workshop
#
# Instructions and materials for the Computational Catalysis workshop can be found on webpage.
#
# # Table of Contents
#
# | **1\. [Introduction](#section_1)**
#
# |-- **1.1. [Some of pMuTT's Capabilities](#section_1_1)**
#
# | **2\. [Useful Links](#section_2)**
#
# | **3\. [Creating statistical mechanical objects using StatMech](#section_3)**
#
# |-- **3.1. [Supported StatMech models](#section_3_1)**
#
# |--|-- **3.1.1. [Translations](#section_3_1_1)**
#
# |--|-- **3.1.2. [Vibrations](#section_3_1_2)**
#
# |--|-- **3.1.3. [Rotations](#section_3_1_3)**
#
# |--|-- **3.1.4. [Electronic](#section_3_1_4)**
#
# |--|-- **3.1.5. [Miscellaneous](#section_3_1_5)**
#
# |-- **3.2. [Initializing StatMech modes individually](#section_3_2)**
#
# |-- **3.3. [Initializing StatMech modes using presets](#section_3_3)**
#
# | **4\. [Creating empirical objects](#section_4)**
#
# |-- **4.1. [Inputting a NASA polynomial directly](#section_4_1)**
#
# |-- **4.2. [Fitting an empirical object to a StatMech object](#section_4_2)**
#
# | **5\. [Input/Output](#section_5)**
#
# |-- **5.1. [Input via Excel](#section_5_1)**
#
# | **6\. [Reactions](#section_6)**
#
# |-- **6.1. [Initializing Reaction objects using from_string](#section_6_1)**
# <a id='section_1'></a>
# # 1. Introduction
#
# <img src="images/pmutt_logo.png" width=400>
#
# - Estimates thermochemical and kinetic parameters using statistical mechanics, transition state theory
# - Writes input files for kinetic models and eases thermodynamic analysis
# - Implemented in Python
# - Easy to learn
# - Heavily used in scientific community
# - Object-oriented approach is a natural analogy to chemical phenomenon
# - Library approach allows users to define the starting point and end point
#
# <img src="images/workflow.png" width=600>
# <a id='section_1_1'></a>
# ## 1.1 Some of pMuTT's Capabilities
# ### Reaction Coordinate Diagrams
#
# See the thermodynamic and kinetic feasibility of reaction mechanisms.
#
# <img src="images/rxn_coordinate_diagram.svg" width=700>
#
# ### Ab-Initio Phase Diagrams
#
# Predict the most stable configuration with respect to temperature and pressure.
#
# **Configurations**
# <img src="images/configurations.png" width=800>
# Typically we would consider more configurations than this.
#
# **1D Phase Diagram**
# <img src="images/Heatmap_1d.svg" width=400>
#
# **2D Phase Diagram**
# <img src="images/Heatmap_2d.png" width=400>
# <a id='section_2'></a>
# # 2. Useful Links
#
# - [Documentation](https://vlachosgroup.github.io/pMuTT/): find the most updated documentation
# - [Issues](https://github.com/VlachosGroup/pmutt/issues): report bugs, request features, receive help
# - [Examples](https://vlachosgroup.github.io/pMuTT/examples.html): see examples
# <a id='section_3'></a>
# # 3. Creating statistical mechanical objects using StatMech
#
# Molecules show translational, vibrational, rotational, electronic, and nuclear modes.
#
# <img src="images/statmech_modes.png" width=800>
# <a id='section_3_1'></a>
# ## 3.1. Supported StatMech modes
#
# <img src="images/StatMech_smartart.png" width=300>
#
# The StatMech object allows us to specify translational, vibrational, rotational, electronic and nuclear modes independently, which gives flexibility in what behavior you would like. Below are the available modes.
# <a id='section_3_1_1'></a>
# ### 3.1.1. Translations
# - [``FreeTrans``](https://vlachosgroup.github.io/pMuTT/statmech.html#freetrans) - Translations assuming no intermolecular interactions. Can be adjusted for 1, 2, or 3 degrees of translation.
# <a id='section_3_1_2'></a>
# ### 3.1.2. Vibrations
# - [``HarmonicVib``](https://vlachosgroup.github.io/pMuTT/statmech.html#harmonicvib) - Harmonic vibrations
# - [``QRRHOVib``](https://vlachosgroup.github.io/pMuTT/statmech.html#harmonicvib) - Quasi rigid rotor harmonic oscillator. Low frequency modes are treated as rigid rotations.
# - [``EinsteinVib``](https://vlachosgroup.github.io/pMuTT/statmech.html#einsteinvib) - Each atom in the crystal vibrates as independent 3D harmonic oscillators
# - [``DebyeVib``](https://vlachosgroup.github.io/pMuTT/statmech.html#debyevib) - Improves upon ``EinsteinVib`` by considering simultaneous vibrations. Improves accuracy at lower temperatures.
# <a id='section_3_1_3'></a>
# ### 3.1.3. Rotations
# - [``RigidRotor``](https://vlachosgroup.github.io/pMuTT/statmech.html#rigidrotor) - Molecule can be rotated with no change in bond properties
# <a id='section_3_1_4'></a>
# ### 3.1.4. Electronic
# - [``GroundStateElec``](https://vlachosgroup.github.io/pMuTT/statmech.html#groundstateelec) - Electronic ground state of the system
# - [``LSR``](https://vlachosgroup.github.io/pMuTT/statmech.html#linear-scaling-relationships-lsrs) - Linear Scaling Relationship to estimate binding energies using reference adsorbate
# <a id='section_3_1_5'></a>
# ### 3.1.5. Miscellaneous
# - [``EmptyMode``](https://vlachosgroup.github.io/pMuTT/statmech.html#empty-mode) - Default mode if not specified. Does not contribute to any properties
# - [``ConstantMode``](https://vlachosgroup.github.io/pMuTT/statmech.html#constant-mode) - Specify arbitrary values to thermodynamic quantities
#
# Using a ``StatMech`` mode gives you access to all the common thermodynamic properties.
#
# <img src="images/StatMech_obj.png" width=400>
#
# For this example, we will use a hydrogen molecule as an ideal gas:
# - translations with no interaction between molecules
# - harmonic vibrations
# - rigid rotor rotations
# - ground state electronic structure
# - no contribution from nuclear modes.
#
# <img src="images/H2_1.jpg" width=200>
# <a id='section_3_2'></a>
# ## 3.2. Initializing StatMech modes individually
# First, we will create an ASE Atoms object of H2. This will make it easier to initialize translations and rotations.
# In[1]:
from ase.build import molecule
from ase.visualize import view
H2_atoms = molecule('H2')
view(H2_atoms)
# Now we will initialize each mode separately
# In[2]:
from pmutt.statmech import StatMech, trans, vib, rot, elec
'''Translational'''
H2_trans = trans.FreeTrans(n_degrees=3, atoms=H2_atoms)
'''Vibrational'''
H2_vib = vib.HarmonicVib(vib_wavenumbers=[4342.]) # vib_wavenumbers in cm-1
'''Rotational'''
H2_rot = rot.RigidRotor(symmetrynumber=2, atoms=H2_atoms)
'''Electronic'''
H2_elec = elec.GroundStateElec(potentialenergy=-6.77,spin=0) # potentialenergy in eV
'''StatMech Initialization'''
H2_statmech = StatMech(name='H2',
trans_model=H2_trans,
vib_model=H2_vib,
rot_model=H2_rot,
elec_model=H2_elec)
'''Calculate thermodynamic properties per mole basis'''
H_statmech = H2_statmech.get_H(T=298., units='kJ/mol')
S_statmech = H2_statmech.get_S(T=298., units='J/mol/K')
print('H_H2(T=298 K) = {:.1f} kJ/mol'.format(H_statmech))
print('S_H2(T=298 K) = {:.2f} J/mol/K'.format(S_statmech))
# If you specify the composition of your species, you can calculate per mass quantities too.
# In[3]:
'''Input composition'''
H2_statmech.elements = {'H': 2}
'''Calculate thermodynamic properties per mass basis'''
H_statmech = H2_statmech.get_H(T=298., units='kJ/g')
S_statmech = H2_statmech.get_S(T=298., units='J/g/K')
print('H_H2(T=298 K) = {:.1f} kJ/g'.format(H_statmech))
print('S_H2(T=298 K) = {:.2f} J/g/K'.format(S_statmech))
# <a id='section_3_3'></a>
# ## 3.3. Initializing StatMech modes using presets
#
# Commonly used models can be accessed via [``presets``](https://vlachosgroup.github.io/pMuTT/statmech.html#presets). The currently supported models are:
#
# - [``idealgas``](https://vlachosgroup.github.io/pMuTT/statmech.html#ideal-gas-idealgas) - Ideal gases
# - [``harmonic``](https://vlachosgroup.github.io/pMuTT/statmech.html#harmonic-approximation-harmonic) - Typical for surface species
# - [``electronic``](https://vlachosgroup.github.io/pMuTT/statmech.html#electronic-electronic) - Only has electronic modes
# - [``placeholder``](https://vlachosgroup.github.io/pMuTT/statmech.html#placeholder-placeholder) - No contribution to any property
# - [``constant``](https://vlachosgroup.github.io/pMuTT/statmech.html#constant-constant) - Use arbitrary constants to thermodynamic properties
#
# In[4]:
from ase.build import molecule
from pmutt.statmech import StatMech, presets
H2_statmech = StatMech(atoms=molecule('H2'),
vib_wavenumbers=[4342.], # cm-1
symmetrynumber=2,
potentialenergy=-6.77, # eV
spin=0.,
**presets['idealgas'])
'''Calculate thermodynamic properties'''
H_statmech = H2_statmech.get_H(T=298., units='kJ/mol')
S_statmech = H2_statmech.get_S(T=298., units='J/mol/K')
print('H_H2(T=298 K) = {:.1f} kJ/mol'.format(H_statmech))
print('S_H2(T=298 K) = {:.2f} J/mol/K'.format(S_statmech))
# <a id='section_4'></a>
# # 4. Creating empirical objects
# Currently, pMuTT supports
#
# - [NASA polynomials](https://vlachosgroup.github.io/pMuTT/empirical.html#nasa)
# - [NASA9 polynomials](https://vlachosgroup.github.io/pMuTT/empirical.html#nasa9)
# - [Shomate polynomials](https://vlachosgroup.github.io/pMuTT/empirical.html#shomate).
#
# They can be initialized in three ways:
# - inputting the polynomials directly
# - from another model (e.g. ``StatMech``, ``Shomate``) using (``from_model``)
# - from heat capacity, enthalpy and entropy data using (``from_data``)
#
# <img src="images/nasa_func1.png" width=400>
# <a id='section_4_1'></a>
# ## 4.1. Inputting a NASA polynomial directly
#
# The H2 NASA polynomial from the [Burcat database](http://combustion.berkeley.edu/gri_mech/version30/files30/thermo30.dat) is represented as:
#
# ```
# H2 TPIS78H 2 G 200.000 3500.000 1000.000 1
# 3.33727920E+00-4.94024731E-05 4.99456778E-07-1.79566394E-10 2.00255376E-14 2
# -9.50158922E+02-3.20502331E+00 2.34433112E+00 7.98052075E-03-1.94781510E-05 3
# 2.01572094E-08-7.37611761E-12-9.17935173E+02 6.83010238E-01 4
# ```
#
# This can be translated to pMuTT syntax using:
# In[5]:
import numpy as np
from matplotlib import pyplot as plt
from pmutt import plot_1D
from pmutt.empirical.nasa import Nasa
# Initialize NASA polynomial
H2_nasa = Nasa(name='H2',
elements={'H': 2},
phase='G',
T_low=200., T_mid=1000., T_high=3500.,
a_low=[2.34433112E+00, 7.98052075E-03, -1.94781510E-05,
2.01572094E-08, -7.37611761E-12, -9.17935173E+02,
6.83010238E-01],
a_high=[3.33727920E+00, -4.94024731E-05, 4.99456778E-07,
-1.79566394E-10, 2.00255376E-14, -9.50158922E+02,
-3.20502331E+00])
# Calculate thermodynamic quantities using the same syntax as StatMech
H_H2 = H2_nasa.get_H(units='kcal/mol', T=298.)
print('H_H2(T=298 K) = {} kcal/mol'.format(H_H2))
# Show thermodynamic quantities vs. T
T = np.linspace(200., 3500.)
f2, ax2 = plot_1D(H2_nasa,
x_name='T', x_values=T,
methods=('get_H', 'get_S', 'get_G'),
get_H_kwargs={'units': 'kcal/mol'},
get_S_kwargs={'units': 'cal/mol/K'},
get_G_kwargs={'units': 'kcal/mol'})
# Modifying figure
ax2[0].set_ylabel('H (kcal/mol)')
ax2[1].set_ylabel('S (cal/mol/K)')
ax2[2].set_ylabel('G (kcal/mol)')
ax2[2].set_xlabel('T (K)')
f2.set_size_inches(5, 5)
f2.set_dpi(200)
plt.show()
# <a id='section_4_2'></a>
# ## 4.2. Fitting an empirical object to a StatMech object
# Empirical objects can be made directly any species objects using the ``from_model`` method.
# In[6]:
H2_nasa = Nasa.from_model(name='H2',
T_low=200.,
T_high=3500.,
model=H2_statmech)
# Compare the statistical mechanical model to the empirical model
f3, ax3 = H2_nasa.plot_statmech_and_empirical(Cp_units='J/mol/K',
H_units='kJ/mol',
S_units='J/mol/K',
G_units='kJ/mol')
f3.set_size_inches(6, 8)
f3.set_dpi(150)
plt.show()
# In[7]:
from pmutt.empirical.shomate import Shomate
H2_shomate = Shomate.from_model(model=H2_nasa)
# Compare the statistical mechanical model to the empirical model
f3, ax3 = H2_shomate.plot_statmech_and_empirical(Cp_units='J/mol/K',
H_units='kJ/mol',
S_units='J/mol/K',
G_units='kJ/mol')
f3.set_size_inches(6, 8)
f3.set_dpi(150)
plt.show()
# The ``Shomate`` is a simpler polynomial than the ``Nasa`` polynomial so it does not capture the features as well for the large T range. It is always a good idea to check your fit.
# <a id='section_5'></a>
# # 5. Input/Output
# pMuTT has more IO functionality than below. See this page for [supported IO functions](https://vlachosgroup.github.io/pMuTT/io.html).
# <a id='section_5_1'></a>
# ## 5.1. Input via Excel
#
# Encoding each object in Python can be tedious. You can read several species from Excel spreadsheets using [``pmutt.io.excel.read_excel``](https://vlachosgroup.github.io/pmutt/io.html?highlight=read_excel#pmutt.io.excel.read_excel). Note that this function returns a list of dictionaries. This output allows you to initialize whichever object you want using kwargs syntax. There are also [special rules that depend on the header name](https://vlachosgroup.github.io/pMuTT/io.html#special-rules).
#
# Below, we show an example importing species data from a spreadsheet and creating a series of NASA polynomials.
# First, we ensure that the current working directory is the same as the notebook so we can access the spreadsheet.
# In[8]:
import os
from pathlib import Path
# Find the location of Jupyter notebook
# Note that normally Python scripts have a __file__ variable but Jupyter notebook doesn't.
# Using pathlib can overcome this limiation
try:
notebook_path = os.path.dirname(__file__)
except NameError:
notebook_path = Path().resolve()
os.chdir(notebook_path)
# Now we can read from the spreadsheet.
# In[9]:
from pprint import pprint
from pmutt.io.excel import read_excel
ab_initio_data = read_excel(io='./input/NH3_Input_Data.xlsx', sheet_name='species')
pprint(ab_initio_data)
# After the data is read, we can fit the ``Nasa`` objects from the statistical mechanical data.
# In[10]:
from pmutt.empirical.nasa import Nasa
# Create NASA polynomials using **kwargs syntax
nasa_species = []
for species_data in ab_initio_data:
single_nasa_species = Nasa.from_model(T_low=100.,
T_high=1500.,
**species_data)
nasa_species.append(single_nasa_species)
# Just to ensure the species were read correctly, we can try printing out thermodynamic values.
# In[11]:
import pandas as pd
thermo_data = {'Name': [],
'H (kcal/mol)': [],
'S (cal/mol/K)': [],
'G (kcal/mol)': []}
'''Calculate properties at 298 K'''
T = 298. # K
for single_nasa_species in nasa_species:
thermo_data['Name'].append(single_nasa_species.name)
thermo_data['H (kcal/mol)'].append(single_nasa_species.get_H(units='kcal/mol', T=T))
thermo_data['S (cal/mol/K)'].append(single_nasa_species.get_S(units='cal/mol/K', T=T))
thermo_data['G (kcal/mol)'].append(single_nasa_species.get_G(units='kcal/mol', T=T))
'''Create Pandas Dataframe for easy printing'''
columns = ['Name', 'H (kcal/mol)', 'S (cal/mol/K)', 'G (kcal/mol)']
thermo_data = pd.DataFrame(thermo_data, columns=columns)
print(thermo_data)
# <a id='section_6'></a>
# # 6. Reactions
#
# ``Reaction`` objects can be created by putting together ``Nasa``, ``Nasa9``, ``Shomate`` and ``StatMech`` objects.
# <img src="images/reaction_smartart.png" width=300>
#
# <img src="images/reaction.png" width=600>
#
# <img src="images/reaction_func1.png" width=800>
# <a id='section_6_1'></a>
# ## 6.1. Initializing Reaction objects using from_string
#
# The ``from_string`` method is the easiest way to create a ``Reaction`` object. It requires the relevant species to be in a dictionary and a string to describe the reaction.
#
# <img src="images/reaction_string.svg" width=800>
#
# We will demonstrate its use for the formation of NH3.
# In[12]:
from pmutt.empirical.nasa import Nasa
from pmutt.empirical.shomate import Shomate
from pmutt.reaction import Reaction
# Create species. Note that you can mix different types of species
species = {
'H2': StatMech(name='H2', atoms=molecule('H2'),
vib_wavenumbers=[4342.], # cm-1
symmetrynumber=2,
potentialenergy=-6.77, # eV
spin=0.,
**presets['idealgas']),
'N2': Nasa(name='N2', T_low=300., T_mid=643., T_high=1000.,
a_low=[3.3956319945669633, 0.001115707689025668,
-4.301993779374381e-06, 6.8071424019295535e-09,
-3.2903312791047058e-12, -191001.55648623788,
3.556111439828502],
a_high=[4.050329990684662, -0.0029677854067980108,
5.323485005316287e-06, -3.3518122405333548e-09,
7.58446718337381e-13, -191086.2004520406,
0.6858235504924011]),
'NH3': Shomate(name='NH3', T_low=300., T_high=1000.,
a=[18.792357134351683, 44.82725349479501,
-10.05898449447048, 0.3711633831565547,
0.2969942466370908, -1791.225746924463,
203.9035662274934, 1784.714638346206]),
}
# Define the formation of ammonia reaction
rxn = Reaction.from_string('1.5H2 + 0.5N2 = NH3', species)
# Now we can calculate thermodynamic properties of the reaction.
# In[13]:
'''Forward change in enthalpy'''
H_rxn_fwd = rxn.get_delta_H(units='kcal/mol', T=300.)
print('Delta H_fwd(T = 300 K) = {:.1f} kcal/mol'.format(H_rxn_fwd))
'''Reverse change in entropy'''
S_rxn_rev = rxn.get_delta_S(units='cal/mol/K', T=300., rev=True)
print('Delta S_rev(T = 300 K) = {:.1f} cal/mol/K'.format(S_rxn_rev))
'''Gibbs energy of reactants'''
G_react = rxn.get_G_state(units='kcal/mol', T=300., state='reactants')
print('G_reactants(T = 300 K) = {:.1f} kcal/mol'.format(G_react))
|
import datetime
anoN = int(input('Ano de nascimento: '))
anoA = datetime.date.today().year
idade = anoA - anoN
# print(anoN, anoA, idade)
print('Quem nasceu em {} tem {} anos em {}.'.format(anoN, idade, anoA))
print('''Qual seu sexo? Digite:
[ F ] para sexo feminino
[ M ] para sexo masculino''')
lido = str(input('Opção: '))
sexo = lido.strip().upper()
# print(sexo)
if sexo == 'M':
print('Você precisa fazer o alistamento militar obrigatório.')
elif sexo == 'F':
print('Você não precisa fazer o alistamento militar obrigatório.')
else:
print('Opção inválida. Tente novamente.')
if idade < 18 and sexo == 'M':
dif = 18 - idade
print('Ainda faltam {} anos para o alistamento.'.format(dif))
anoF = anoA + dif
print('Seu alistamento será em {}.'.format(anoF))
elif idade > 18 and sexo == 'M':
dif = idade - 18
print('Você já deveria ter se alistado há {} anos.'.format(dif))
anoP = anoA - dif
print('Seu alistamento foi em {}.'.format(anoP))
elif idade == 18 and sexo == 'M':
print('Você tem que se alistar IMEDIATAMENTE!')
|
from PySide2 import QtGui
from PySide2.QtCore import Slot, QObject, Signal
from PySide2.QtWidgets import QWidget, QMessageBox
from app.ui import qss
from app.ui.ui_home import Ui_Home
from app.lib.global_var import G
from app.honey import all_app
from app.setting import SettingWidget
import webbrowser
class HomeJob(QObject):
""" 安装成功后增加UI信号 """
install_signal = Signal(dict)
def __init__(self):
super(self.__class__, self).__init__()
class HomeWidget(QWidget, Ui_Home):
def __init__(self, mainwindow):
super(self.__class__, self).__init__()
self.setupUi(self)
self.setStyleSheet(qss)
self.mainwindow = mainwindow
self.progressBar.setFixedHeight(3)
self.job = HomeJob()
## button
self.setting_btn.clicked.connect(self.setting_click)
self.github.clicked.connect(self.github_click)
self.qq.clicked.connect(self.qq_click)
## pypi
self.job.install_signal.connect(self.add_installed_layout_slot)
self.ready_action()
def setting_click(self):
self.setting_widget = SettingWidget()
self.setting_widget.show()
def ready_action(self):
self.init_ui()
def init_ui(self):
# 添加布局
# 已安装
[all_app[cfg['cls_name']](self, **cfg) for pack_name, cfg in G.config.installed_apps.items()]
# 所有app
[app_cls(self) for app_cls in all_app.values()]
@Slot(dict)
def add_installed_layout_slot(self, data):
app_cls = all_app[data['cls_name']]
app_cls(self, **data)
def closeEvent(self, event: QtGui.QCloseEvent):
try:
self.setting_widget.close()
except:
pass
event.accept()
def qq_click(self):
self.open_url("https://jq.qq.com/?_wv=1027&k=5xWbIq3")
def github_click(self):
self.open_url("https://github.com/ctpbee")
def open_url(self, url):
try:
webbrowser.get('chrome').open_new_tab(url)
except Exception as e:
webbrowser.open_new_tab(url)
|
#!/usr/bin/env python2.7
# coding=utf-8
'''
@date = '17/4/7'
@author = 'chenliang'
@email = 'chenliang2380@cvte.cn'
'''
from FATERUI.paramwidget import *
from collections import OrderedDict
import FATERUI.editwidget
import cv2
class ThresholdUi(ParamWidget):
def __init__(self,param=''):
super(ThresholdUi,self).__init__(param)
self.setWindowTitle("Threshold Image")
def _initParamStr(self):
self._parentName = 'Process'
self._name = 'Threshold'
paramType = {'Method':['C',['adaptiveThreshold','threshold']],
'Type':['C',['THRESH_BINARY_INV','THRESH_BINARY','THRESH_TRUNC']],
'MaxVal':['T',201,0,255,1]}
paramList = ['Method','Type','MaxVal']
self._paramType = OrderedDict()
for i in paramList:
self._paramType[i]=paramType[i]
self._param={'Method':'adaptiveThreshold','Type':'THRESH_TRUNC','MaxVal':120}
def run(self):
image = self.getProcessImage()
cv2ImageRGB = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
self.setProcessRes(cv2ImageRGB)
return FATERUI.editwidget.StatePass
|
"""The main module of this package."""
import importlib.metadata
__version__ = importlib.metadata.version(__name__)
|
import os
import pickle
from os.path import join as pjoin
import click
from vocabirt.embed_nn.loader.common import get_resp_split
from .discrimexp import Scorer, evaluate, prepare_args, print_summary, strategy_opt
@click.command()
@click.argument("vocab_response_path")
@click.argument("irt_path_cv", type=click.Path())
@click.argument("outf", type=click.File("wb"))
@strategy_opt
@click.option(
"--estimator", type=click.Choice(["logistic", "hill-climb"]), default="hill-climb",
)
@click.option("--no-discrim-preds/--discrim-preds")
@click.option("--verbose/--terse")
def main(
vocab_response_path,
irt_path_cv,
outf,
strategy,
estimator,
no_discrim_preds,
verbose,
):
scorer = Scorer()
irt_paths = {}
for path in os.listdir(irt_path_cv):
if not path.startswith("resp") or not path.endswith(".pkl"):
continue
irt_paths[int(path[4])] = pjoin(irt_path_cv, path)
splits = len(irt_paths)
assert splits in (1, 3)
for split, irt_path in irt_paths.items():
train_resp, test_resp = get_resp_split(15, split, splits)
if splits == 1:
test_resp = train_resp
with open(irt_path, "rb") as irt_pkl:
args = prepare_args(
vocab_response_path,
irt_pkl,
strategy,
estimator,
no_discrim_preds=no_discrim_preds,
no_reestimate=False,
verbose=verbose,
scorer=scorer,
)
for respondent in test_resp:
evaluate(**args, respondent=respondent)
df = scorer.as_df()
last_df = scorer.as_df(last_only=True)
print_summary(last_df)
pickle.dump(
{"df": df, "last_df": last_df,}, outf,
)
if __name__ == "__main__":
main()
|
"""This script aimed at extracting several time-series for the IMPAC challenge.
Parameters
----------
PATH_TO_DATA : list of str
The path to the subjects with possibility to use a wildcard, e.g.
['/example/*', '/anotherone/*].
SUBJECTS_EXCLUDED : str
Path to a csv containing a column 'subject_id' with the id of the patient
to be excluded.
PATH_OUTPUT : str
Location to dump the pickle of the timeseries. Each subject will be then
dumped in a
`PATH_OUTPUT/subject_id/atlas_descr/subject_id_task-Rest_confounds.pkl`
PATH_TO_RESTING_STATE : str
Path to the resting functional MRI in each subject folder. Default is
'session_1/rest_1/rest_res2standard.nii.gz'
PATH_TO_MOTION_CORRECTION : str
Path to the motion correction parameters which is located in each subject
folder. Default is 'session_1/rest_1/rest_mc.1D'.
ATLASES : list of atlases
A list of the atlases to use.
ATLASES_DESCR : list of atlases names
A list of the ATLASES name to store properly the data later on.
N_JOBS : int
The number of workers. The parallel computation is performed for the
subjects.
"""
import glob
from os import makedirs, listdir
from os.path import join, basename, normpath, exists, isdir
from shutil import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from sklearn.datasets.base import Bunch
from sklearn.externals import six
from nilearn import image
from nilearn._utils import check_niimg
from nilearn.input_data import (NiftiLabelsMasker, NiftiMapsMasker,
NiftiSpheresMasker)
from nilearn.datasets import (fetch_atlas_basc_multiscale_2015,
fetch_atlas_msdl, fetch_atlas_craddock_2012,
fetch_atlas_harvard_oxford,
fetch_coords_power_2011)
from nilearn.connectome import ConnectivityMeasure
def _make_masker_from_atlas(atlas, memory=None, memory_level=1):
"""Construct a maker from a given atlas.
Parameters
----------
atlas : str or 3D/4D Niimg-like object,
The atlas to use to create the masker. If string, it should corresponds
to the path of a Nifti image.
memory : instance of joblib.Memory or string, (default=None)
Used to cache the masking process. By default, no caching is done. If a
string is given, it is the path to the caching directory.
memory_level : integer, optional (default=1)
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
Returns
-------
masker : Nilearn Masker
Nilearn Masker.
"""
if isinstance(atlas, six.string_types):
atlas_ = check_niimg(atlas)
atlas_dim = len(atlas_.shape)
if atlas_dim == 3:
masker = NiftiLabelsMasker(atlas_,
memory=memory,
memory_level=memory_level,
smoothing_fwhm=6,
detrend=True,
verbose=1)
elif atlas_dim == 4:
if 'craddock' in atlas:
atlas_ = image.index_img(atlas_, 25)
masker = NiftiLabelsMasker(atlas_,
memory=memory,
memory_level=memory_level,
smoothing_fwhm=6,
detrend=True,
verbose=1)
else:
masker = NiftiMapsMasker(atlas_,
memory=memory,
memory_level=memory_level,
smoothing_fwhm=6,
detrend=True,
verbose=1)
else:
coords = np.vstack((atlas.rois['x'],
atlas.rois['y'],
atlas.rois['z'])).T
# check the radius for the sphere
masker = NiftiSpheresMasker(seeds=coords,
radius=5.,
memory=memory,
memory_level=memory_level,
smoothing_fwhm=6,
detrend=True,
verbose=1)
return masker
def _extract_timeseries(func,
atlas=fetch_atlas_basc_multiscale_2015().scale064,
confounds=None,
memory=None,
memory_level=1):
"""Extract time series for a list of functional volume.
Parameters
----------
func : str,
Path of Nifti volumes.
atlas : str or 3D/4D Niimg-like object, (default=BASC64)
The atlas to use to create the masker. If string, it should corresponds
to the path of a Nifti image.
confounds : str,
Path containing the confounds.
memory : instance of joblib.Memory or string, (default=None)
Used to cache the masking process. By default, no caching is done. If a
string is given, it is the path to the caching directory.
memory_level : integer, optional (default=1)
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
n_jobs : integer, optional (default=1)
Used to process several subjects in parallel.
If -1, then the number of jobs is set to the number of
cores.
"""
try:
masker = _make_masker_from_atlas(atlas, memory=memory,
memory_level=memory_level)
if confounds is not None:
confounds_ = np.loadtxt(confounds)
else:
confounds_ = None
return masker.fit_transform(func, confounds=confounds_)
except ValueError as e:
print(str(e))
# pylint: disable=invalid-name
N_JOBS = 4
###############################################################################
# Path definition
PATH_TO_DATA = ['/home/lemaitre/Documents/data/INST/*']
SUBJECTS_EXCLUDED = ('/home/lemaitre/Documents/data/'
'inst_excluded_subjects.csv')
PATH_OUTPUT = '/home/lemaitre/Documents/data/INST_time_series'
subjects_path = []
for pdata in PATH_TO_DATA:
subjects_path += glob.glob(pdata)
subjects_path = sorted(subjects_path)
subjects_path = [sp for sp in subjects_path if isdir(sp)]
PATH_TO_RESTING_STATE = 'session_1/rest_1/rest_res2standard.nii.gz'
PATH_TO_MOTION_CORRECTION = 'session_1/rest_1/rest_mc.1D'
# path to the atlases
ATLASES = [fetch_atlas_msdl().maps,
fetch_atlas_basc_multiscale_2015().scale064,
fetch_atlas_basc_multiscale_2015().scale122,
fetch_atlas_basc_multiscale_2015().scale197,
fetch_atlas_harvard_oxford(atlas_name='cort-prob-2mm').maps,
fetch_atlas_craddock_2012().scorr_mean,
fetch_coords_power_2011()]
ATLASES_DESCR = ['msdl', 'basc064', 'basc122', 'basc197',
'harvard_oxford_cort_prob_2mm', 'craddock_scorr_mean',
'power_2011']
# load the list of patient to exclude
excluded_subjects = pd.read_csv(
SUBJECTS_EXCLUDED,
dtype={'subject_id': object})['subject_id'].tolist()
###############################################################################
# Build the list with all path
dataset = {'func': [], 'motion': [], 'subject_id': [], 'run': []}
for i, path_subject in enumerate(subjects_path):
subject_id = basename(normpath(path_subject))
if subject_id in excluded_subjects:
continue
content_dir = listdir(path_subject)
run_path = sorted([folder
for folder in content_dir
if isdir(join(path_subject, folder)) and
'run_' in folder])
for rp in run_path:
dataset['subject_id'].append(subject_id)
dataset['run'].append(rp)
dataset['func'].append(join(path_subject, rp,
PATH_TO_RESTING_STATE))
dataset['motion'].append(join(path_subject, rp,
PATH_TO_MOTION_CORRECTION))
# Create a Bunch object
dataset = Bunch(**dataset)
for atlas, atlas_descr in zip(ATLASES, ATLASES_DESCR):
# Do not include the confounds when extracting the time series
time_series = Parallel(n_jobs=N_JOBS, verbose=1)(
delayed(
_extract_timeseries)(func, atlas=atlas, confounds=None)
for func, confounds in zip(dataset.func, dataset.motion))
for ts, subject_id, rp, original_confound in zip(time_series,
dataset.subject_id,
dataset.run,
dataset.motion):
# skip subjects for which time series extraction did not work
if ts is None:
continue
# store the time series
path_subject = join(PATH_OUTPUT, atlas_descr, subject_id, rp)
if not exists(path_subject):
makedirs(path_subject)
filename = join(path_subject,
'%s_task-Rest_confounds.csv' % subject_id)
np.savetxt(filename, ts, delimiter=',')
# store the confounds in a separate directories
path_subject = join(PATH_OUTPUT, 'motions', subject_id, rp)
if not exists(path_subject):
makedirs(path_subject)
copy(original_confound, join(path_subject, 'motions.txt'))
# store the matrix of correlation for MSDL
if atlas_descr == 'msdl':
path_subject = join(PATH_OUTPUT, 'correlation')
if not exists(path_subject):
makedirs(path_subject)
connectivity_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = connectivity_measure.fit_transform([ts])[0]
plt.figure()
np.fill_diagonal(correlation_matrix, 0)
plt.imshow(correlation_matrix, vmin=-1., vmax=1., cmap='RdBu_r',
interpolation='nearest')
plt.colorbar()
plt.title('Correlation matrix MSDL atlas')
path_image = join(path_subject, subject_id + '_' + rp + '.png')
plt.savefig(path_image, bbox_inches='tight')
|
import torch
import pandas as pd
import numpy as np
import time
import traceback
import torch.utils.data
from pathlib import Path
import os,sys
import cv2
import yaml
from imutils.paths import list_images
from tqdm import tqdm
import argparse
import albumentations as A
from sklearn.metrics import classification_report
import random
try:
import pretty_errors
pretty_errors.configure(display_locals = True)
except ImportError:
pass
import warnings
#warnings.filterwarnings("ignore",)
from rich import print,console
from rich.progress import track
console = console.Console()
from model import build_model
from dataset import create_transform
from importlib import machinery
_cur=Path(__file__).absolute().parent
machinery.SourceFileLoader('general',str(_cur/'../libs/general.py')).load_module()
machinery.SourceFileLoader('m_utils',str(_cur/'../libs/model_utils.py')).load_module()
from general import seed_everything,get_args
from m_utils import get_labels,Model_Saver, get_features
def test(args, model):
files=list(list_images(args.test_path))
files.sort()
trfm_train,trfm_val=create_transform(args)
df=pd.DataFrame(columns=('filename','pred'))
for file in track(files):
img=cv2.imread(file)
y=m_detect(model,img,trfm_val)
if random.random()<args.tta:
y=tta(model,img,trfm_train)
name=Path(file).name
new={'filename':name,'pred':y}
df=df.append(new, ignore_index=True)
if args.calculate:
gt=[]
for file in files:
label=np.array(labels[Path(file).name])
gt.append(label)
df['gt']=(gt)
df = df.infer_objects()
df=df.sort_values(['filename'])
print(df.tail())
report_scores(df)
df.to_csv('outputs/test_result.csv')
@torch.inference_mode()
def m_detect(model,img,transform):
img=transform(image=img)['image']
img=img[None].float()
out=model(img);
y=out.squeeze().tolist()
return y
def tta(model,img,transform):
ys=[]
for f in range(5):
out=m_detect(model,img,transform)
ys.append(out)
y=np.mean(ys,axis=0).squeeze()
return list(y)
def report_scores(df):
gt=np.array(df['gt'].to_list());pred=np.array(df['pred'].to_list())
mae=np.mean(np.abs(gt-pred))
std=np.std(np.abs(gt-pred))
print(f'mae_score is {mae}, std is {std}')
if __name__ == '__main__':
seed_everything(42)
args=get_args('config.yaml',sys.argv)
if args.mpath == 'required':
raise RuntimeError('Need to specify the model path')
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
mname='default'
classLabels=get_labels('data/labels_name.csv')
with open('data/all_labels.yaml','r') as f:
labels=yaml.safe_load(f)
num_classes=len(classLabels)
model=build_model(mname,num_classes,chechpoint=args.mpath)
model.eval()
#normalizer=False
test(args, model)
if args.if_feature:
model_f=build_model(mname,num_classes,chechpoint=args.mpath,feature=True)
_,trfm_val=create_transform(args)
img_dir=args.test_path
get_features(args,img_dir,model_f,trfm_val)
Model_Saver.convert2onnx(model,args)
|
def pdfFile(filename):
import PyPDF2
import time
print("------------------------------------- PDF To Text Extraction Start Here -------------------------------------\n")
x = 0
pdfFileObj = open(filename, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
x = pdfReader.numPages
print("[SYSTEM] Extracting data from docx to text\n")
for y in range(0,x):
pageObj = pdfReader.getPage(y)
print("Page no :" + str (1 + pdfReader.getPageNumber(pageObj)))
content = pageObj.extractText()
f = open('input\\ppdf.txt', 'w')
f.write(content)
f.close()
print("[SYSTEM] Writing data to a text file\n")
pdfFileObj.close() |
# Generated by Django 2.2.8 on 2020-07-13 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sdf', '0002_auto_20200713_1449'),
]
operations = [
migrations.AlterField(
model_name='sdfcompounds',
name='activitiesProp',
field=models.CharField(default='GENUI_ACTIVITIES', max_length=256),
),
migrations.AlterField(
model_name='sdfcompounds',
name='activityTypesProp',
field=models.CharField(default='GENUI_ACTIVITY_TYPES', max_length=256),
),
migrations.AlterField(
model_name='sdfcompounds',
name='activityUnitsProp',
field=models.CharField(blank=True, default='GENUI_ACTIVITY_UNITS', max_length=256, null=True),
),
]
|
# Unterprogramm zur Konvertierung und Einteilung von .ogg Dateien
# Angelehnt an https://stackoverflow.com/a/62872679
from pydub import AudioSegment
import os
import math
class SplitConvert():
def __init__(self,folder,filename):
self.folder = folder
self.filename = filename
self.filepath = folder + '\\' + filename
self.filename = self.filename.replace("ogg","wav")
try:
self.audio = AudioSegment.from_ogg(self.filepath)
except Exception as e:
print(e)
def get_duration(self):
"""Länge ermitteln"""
return self.audio.duration_seconds
def single_split(self, from_min, to_min, split_filename):
"""Dateien teilen"""
try:
t1 = from_min * 60 * 1000
t2 = to_min * 60 * 1000
split_audio = self.audio[t1:t2]
split_audio.export(self.folder + '\\' + split_filename, format="wav")
except Exception as e:
print(e)
def multiple_split(self, min_per_split):
"""Teielen in mehrere Teile"""
try:
parts = []
total_mins = math.ceil(self.get_duration() / 60)
for i in range(0, total_mins, min_per_split):
split_fn = str(int(i/min_per_split)) + '_' + self.filename
self.single_split(i, i+min_per_split, split_fn)
print(str(int(i/min_per_split)) + ' Done')
if i == total_mins - min_per_split:
print('All split successfully')
parts.append(split_fn)
return(parts)
except Exception as e:
print(e)
if __name__ == '__main__':
folder = 'C:\\Users\\Vasco\\Documents\\Python\\SchwurbelTranskipt'
file = 'test 7min.ogg'
split_wav = SplitConvert(folder,file)
split_wav.multiple_split(min_per_split=5) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 7/6/2018 17:26 AM
# @Author : jaykky
# @File : zjai_6_comparison.py
# @Software: ZJ_AI
#此程序是用于比对模型的识别成果和真实情况,并得到精确率和召回率的模型指标。
#主要方法是对比两个xml文件的object差异
#属于测试fasterrcnn的模型性能
#输入:两个xml文件的路径,
#输出:精确率和召回率
# =========================================================
import xml.etree.ElementTree as ET
import os
def get_xml_label_num(xmlPath):
'''
函数用于得到xml文件的object信息
:param xmlPath:
:return:
'''
if os.path.exists(xmlPath)!=1:
print(xmlPath)
et = ET.parse(xmlPath)
element = et.getroot()
element_objs = element.findall('object')
count=len(element_objs)
labelList=[]
for element_obj in element_objs:
node = element_obj.find('name')
label=node.text
labelList.append(label)
return count,labelList
def compare_from_xml(xmlPath1,xmlPath2):
xmlFileList1=[]
xmlFileList2 = []
for xmlFile in os.listdir(xmlPath1):
xmlFileList1.append(os.path.join(xmlPath1,xmlFile))
xmlFileList2.append(os.path.join(xmlPath2, xmlFile))
print(len(xmlFileList1),len(xmlFileList2))
tp_sum=0
fp_sum=0
fn_sum=0
d_sum=0
t_sum=0
for i in range(len(xmlFileList1)):
tp=0
fp=0
fn=0
xmlFile1=xmlFileList1[i]
xmlFile2=xmlFileList2[i]
d_labelNum, d_labelList=get_xml_label_num(xmlFile1)
t_labelNum, t_labelList=get_xml_label_num(xmlFile2)
for d_label in d_labelList:
if d_label in t_labelList:
labenIndex=t_labelList.index(d_label)
t_labelList.remove(t_labelList[labenIndex])
tp+=1
else:
fp+=1
fn=t_labelNum-tp
tp_sum+=tp
fp_sum+=fp
fn_sum+=fn
# if fp !=0 or fn !=0:
# io_utils.copy(xmlFile1.replace("Annotations","JPEGImages").replace(".xml",".jpg"),save_path)
# io_utils.copy(xmlFile1,save_path)
d_sum+=d_labelNum
t_sum+=t_labelNum
# print(xmlFile1,xmlFile2,tp,fp,fn,d_labelNum,t_labelNum)
prec=tp_sum/(fp_sum+tp_sum)
recall=tp_sum/(tp_sum+fn_sum)
print(prec,recall)
print(tp_sum,fp_sum,fn_sum,d_sum,t_sum)
return "{},{},{},{},{},{},{}".format(prec,recall,tp_sum,fp_sum,fn_sum,d_sum,t_sum)
if __name__=="__main__":
xmlPath1="/home/hyl/data/ljk/github-pro/zjai-com/data/train_data/random_choice_data_1000/Annotations_test"
xmlPath2="/home/hyl/data/ljk/github-pro/zjai-com/data/train_data/random_choice_data_1000/Annotations"
save_path='/home/hyl/data/ljk/github-pro/zjai-com/data/predict_data/images'
compare_from_xml(xmlPath1,xmlPath2)
|
# game environment parameters
width = 540
height = 440
block_length = 20
brainLayer = [24, 16, 3] # neural network layers that act as brain of snake
# genetic algorithm parameter
population_size = 50
no_of_generations = 30
per_of_best_old_pop = 20.0 # percent of best performing parents to be included
per_of_worst_old_pop = 2.0 # percent of worst performing parents to be included
mutation_percent = 7.0
mutation_intensity = 0.1
|
from rest_framework import status
from mayan.apps.documents.tests.mixins import DocumentTestMixin
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from ..models import Index
from ..permissions import (
permission_document_indexing_create, permission_document_indexing_delete,
permission_document_indexing_view
)
from .literals import TEST_INDEX_LABEL, TEST_INDEX_SLUG
from .mixins import IndexTestMixin
class DocumentIndexingAPIViewTestMixin(object):
def _request_test_index_create_api_view(self):
return self.post(
viewname='rest_api:index-list', data={
'label': TEST_INDEX_LABEL, 'slug': TEST_INDEX_SLUG,
'document_types': self.test_document_type.pk
}
)
def _request_test_index_delete_api_view(self):
return self.delete(
viewname='rest_api:index-detail', kwargs={
'pk': self.test_index.pk
}
)
def _request_test_index_detail_api_view(self):
return self.get(
viewname='rest_api:index-detail', kwargs={
'pk': self.test_index.pk
}
)
class DocumentIndexingAPITestCase(
IndexTestMixin, DocumentIndexingAPIViewTestMixin, DocumentTestMixin,
BaseAPITestCase
):
auto_upload_test_document = False
def test_index_create_api_view_no_permission(self):
response = self._request_test_index_create_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Index.objects.count(), 0)
def test_index_create_api_view_with_permission(self):
self.grant_permission(permission=permission_document_indexing_create)
response = self._request_test_index_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
index = Index.objects.first()
self.assertEqual(response.data['id'], index.pk)
self.assertEqual(response.data['label'], index.label)
self.assertEqual(Index.objects.count(), 1)
self.assertEqual(index.label, TEST_INDEX_LABEL)
def test_index_delete_api_view_no_permission(self):
self._create_test_index()
response = self._request_test_index_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(self.test_index in Index.objects.all())
def test_index_delete_api_view_with_access(self):
self._create_test_index()
self.grant_access(
obj=self.test_index, permission=permission_document_indexing_delete
)
response = self._request_test_index_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertTrue(self.test_index not in Index.objects.all())
def test_index_detail_api_view_no_access(self):
self._create_test_index()
response = self._request_test_index_detail_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue('id' not in response.data)
def test_index_detail_api_view_with_access(self):
self._create_test_index()
self.grant_access(
obj=self.test_index, permission=permission_document_indexing_view
)
response = self._request_test_index_detail_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['id'], self.test_index.pk
)
|
"""Module for Deletion Tokenization."""
from .basic_regex_tokenizer import BasicRegexTokenizer
class Deletion(BasicRegexTokenizer):
"""Deletion Tokenizer class."""
def pattern(self) -> str:
"""Return regex for Deletion."""
return r'\b(del|deletion|(copy number)? ?loss)\b'
def token_type(self) -> str:
"""Return deletion token type."""
return 'Deletion'
|
import os
import sys
from collections import OrderedDict
import logging
from plenum.common.constants import ClientBootStrategy, HS_FILE, HS_LEVELDB, \
HS_ROCKSDB, HS_MEMORY, KeyValueStorageType
from plenum.common.types import PLUGIN_TYPE_STATS_CONSUMER
walletsDir = 'wallets'
clientDataDir = 'data/clients'
GENERAL_CONFIG_DIR = '/etc/indy'
# walletDir = 'wallet'
# it should be filled from baseConfig
NETWORK_NAME = ''
USER_CONFIG_DIR = None
GENERAL_CONFIG_FILE = 'plenum_config.py'
NETWORK_CONFIG_FILE = 'plenum_config.py'
USER_CONFIG_FILE = 'plenum_config.py'
pool_transactions_file_base = 'pool_transactions'
domain_transactions_file_base = 'domain_transactions'
config_transactions_file_base = 'config_transactions'
genesis_file_suffix = '_genesis'
poolTransactionsFile = pool_transactions_file_base
domainTransactionsFile = domain_transactions_file_base
configTransactionsFile = config_transactions_file_base
stateTsStorage = KeyValueStorageType.Rocksdb
poolStateDbName = 'pool_state'
domainStateDbName = 'domain_state'
configStateDbName = 'config_state'
stateTsDbName = "state_ts_db"
stateSignatureDbName = 'state_signature'
# There is only one seqNoDB as it maintain the mapping of
# request id to sequence numbers
seqNoDbName = 'seq_no_db'
clientBootStrategy = ClientBootStrategy.PoolTxn
hashStore = {
"type": HS_ROCKSDB
}
primaryStorage = None
domainStateStorage = KeyValueStorageType.Rocksdb
poolStateStorage = KeyValueStorageType.Rocksdb
configStateStorage = KeyValueStorageType.Rocksdb
reqIdToTxnStorage = KeyValueStorageType.Rocksdb
stateSignatureStorage = KeyValueStorageType.Rocksdb
transactionLogDefaultStorage = KeyValueStorageType.Rocksdb
DefaultPluginPath = {
# PLUGIN_BASE_DIR_PATH: "<abs path of plugin directory can be given here,
# if not given, by default it will pickup plenum/server/plugin path>",
PLUGIN_TYPE_STATS_CONSUMER: "stats_consumer"
}
PluginsDir = "plugins"
stewardThreshold = 20
# Monitoring configuration
PerfCheckFreq = 10
UnorderedCheckFreq = 60
# Temporarily reducing DELTA till the calculations for extra work are not
# incorporated
DELTA = 0.4
LAMBDA = 60
OMEGA = 5
SendMonitorStats = False
ThroughputWindowSize = 30
DashboardUpdateFreq = 5
ThroughputGraphDuration = 240
LatencyWindowSize = 30
LatencyGraphDuration = 240
notifierEventTriggeringConfig = {
'clusterThroughputSpike': {
'bounds_coeff': 10,
'min_cnt': 15,
'freq': 60,
'min_activity_threshold': 10,
'use_weighted_bounds_coeff': True,
'enabled': True
},
'nodeRequestSpike': {
'bounds_coeff': 10,
'min_cnt': 15,
'freq': 60,
'min_activity_threshold': 10,
'use_weighted_bounds_coeff': True,
'enabled': True
}
}
SpikeEventsEnabled = False
# Stats server configuration
STATS_SERVER_IP = '127.0.0.1'
STATS_SERVER_PORT = 30000
STATS_SERVER_MESSAGE_BUFFER_MAX_SIZE = 1000
# Node status configuration
DUMP_VALIDATOR_INFO_INIT_SEC = 3
DUMP_VALIDATOR_INFO_PERIOD_SEC = 60
# Controls sending of view change messages, a node will only send view change
# messages if it did not send any sent instance change messages in last
# `ViewChangeWindowSize` seconds
ViewChangeWindowSize = 60
# A node if finds itself disconnected from primary of the master instance will
# wait for `ToleratePrimaryDisconnection` before sending a view change message
ToleratePrimaryDisconnection = 2
# Timeout factor after which a node starts requesting consistency proofs if has
# not found enough matching
ConsistencyProofsTimeout = 5
# Timeout factor after which a node starts requesting transactions
# We assume, that making consistency proof + iterate over all transactions (getAllTxn)
# will take a little time (0.003 sec for making cp for 10 000 txns +
# 0.2 sec for getAllTxn for 10 000 txn)
# Therefore, node communication is the most cost operation
# Timeout for pool catchuping would be nodeCount * CatchupTransactionsTimeout
CatchupTransactionsTimeout = 6
# Log configuration
logRotationBackupCount = 300
logRotationMaxBytes = 100 * 1024 * 1024
logRotationCompression = "xz"
logFormat = '{asctime:s} | {levelname:8s} | {filename:20s} ({lineno: >4}) | {funcName:s} | {message:s}'
logFormatStyle = '{'
logLevel = logging.NOTSET
enableStdOutLogging = True
# OPTIONS RELATED TO TESTS
# TODO test 60sec
TestRunningTimeLimitSec = 100
# Expected time for one stack to get connected to another
ExpectedConnectTime = 3.3 if sys.platform == 'win32' else 2
# Since the ledger is stored in a flat file, this makes the ledger do
# an fsync on every write. Making it True can significantly slow
# down writes as shown in a test `test_file_store_perf.py` in the ledger
# repository
EnsureLedgerDurability = False
log_override_tags = dict(cli={}, demo={})
# Number of messages zstack accepts at once
LISTENER_MESSAGE_QUOTA = 100
REMOTES_MESSAGE_QUOTA = 100
# After `Max3PCBatchSize` requests or `Max3PCBatchWait`, whichever is earlier,
# a 3 phase batch is sent
# Max batch size for 3 phase commit
Max3PCBatchSize = 100
# Max time to wait before creating a batch for 3 phase commit
Max3PCBatchWait = .001
# Each node keeps a map of PrePrepare sequence numbers and the corresponding
# txn seqnos that came out of it. Helps in servicing Consistency Proof Requests
ProcessedBatchMapsToKeep = 100
# After `MaxStateProofSize` requests or `MaxStateProofSize`, whichever is
# earlier, a signed state proof is sent
# Max 3 state proof size
MaxStateProofSize = 10
# State proof timeout
MaxStateProofTime = 3
# After ordering every `CHK_FREQ` batches, replica sends a CHECKPOINT
CHK_FREQ = 100
# Difference between low water mark and high water mark
LOG_SIZE = 3 * CHK_FREQ
CLIENT_REQACK_TIMEOUT = 5
CLIENT_REPLY_TIMEOUT = 15
CLIENT_MAX_RETRY_ACK = 5
CLIENT_MAX_RETRY_REPLY = 5
VIEW_CHANGE_TIMEOUT = 60 # seconds
INSTANCE_CHANGE_TIMEOUT = 60
MAX_CATCHUPS_DONE_DURING_VIEW_CHANGE = 5
MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE = 15
# permissions for keyring dirs/files
WALLET_DIR_MODE = 0o700 # drwx------
WALLET_FILE_MODE = 0o600 # -rw-------
# This timeout is high enough so that even if some PRE-PREPAREs are stashed
# because of being delivered out of order or being out of watermarks or not
# having finalised requests.
ACCEPTABLE_DEVIATION_PREPREPARE_SECS = 600 # seconds
# TXN fields length limits
ALIAS_FIELD_LIMIT = 256
DIGEST_FIELD_LIMIT = 512
TIE_IDR_FIELD_LIMIT = 256
NAME_FIELD_LIMIT = 256
SENDER_CLIENT_FIELD_LIMIT = 256
HASH_FIELD_LIMIT = 256
SIGNATURE_FIELD_LIMIT = 512
JSON_FIELD_LIMIT = 5 * 1024
DATA_FIELD_LIMIT = 5 * 1024
NONCE_FIELD_LIMIT = 512
ORIGIN_FIELD_LIMIT = 128
ENC_FIELD_LIMIT = 5 * 1024
RAW_FIELD_LIMIT = 5 * 1024
SIGNATURE_TYPE_FIELD_LIMIT = 16
BLS_KEY_LIMIT = 512
BLS_SIG_LIMIT = 512
BLS_MULTI_SIG_LIMIT = 512
VERSION_FIELD_LIMIT = 128
DATETIME_LIMIT = 35
PLUGIN_ROOT = 'plenum.server.plugin'
ENABLED_PLUGINS = []
|
# import sys,os
# sys.path.append(os.getcwd())
from selenium.webdriver.common.by import By
"""
首页
"""
# 首页登录列表
sy_login_list = ['登录/注册', '体验课程', '小学课程', '初中课程', '高中课程', '专题课程', '作业辅导', '模拟考试', '直播课堂', '订购/激活']
# sy_login_list_element=
def sy_login_list_fun():
"""
:return: 首页登录xpath定位列表
"""
sy_login_list_element = []
for i in sy_login_list:
sy_login_list_element.append((By.XPATH,
"//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and "
"@text=" + str(
i) + "]"))
return sy_login_list_element
# 首页登录按钮
sy_login_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='登录/注册']")
# 个人中心
sy_personal_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='个人中心']")
# 体验课程
sy_tycourse_btn_XPTH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='体验课程']")
# 小学课程
sy_xxcourse_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='小学课程']")
# 初中课程
sy_czcourse_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='初中课程']")
# 高中课程
sy_gzcourse_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='高中课程']")
# 专题课程
sy_ztcourse_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='专题课程']")
# 作业辅导
sy_homework_help_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='作业辅导']")
# 模拟考试
sy_practice_test_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='模拟考试']")
# 直播课堂
sy_Live_classroom_btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='直播课堂']")
# 订购/激活
sy__btn_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='订购/激活']")
"""
体验课程
"""
# 同步课程
ty_tbcourse_btn_XPATH = (By.XPATH, "//android.widget.TextView[@text='同步课程']")
# 作业辅导
ty_zyfd_btn_XPATH = (By.XPATH, "//android.widget.TextView[@text='作业辅导']")
# 国学
ty_gx_btn_XPATH = (By.XPATH, "//android.widget.TextView[@text='国学']")
'''
小学课程--版本
'''
xx_course_versions_list = ['人教版', '苏教版', '鲁教版', '北师大', '浙教版', '湘教版', '冀教版', '鄂教版', '西师大', '外研社', '语文S版', '沪教版', '教科版',
'青岛版', '青岛(五四)', '人教(五四)']
def xx_course_versions_lsit_fun():
"""
循环操作小学课程版本
:return:
"""
xx_course_lsit_versions_element = []
for i in xx_course_grade_list:
xx_course_lsit_versions_element.append((By.XPATH,
"//android.widget.TextView["
"@resource-id='com.deskmateones:id/tv_versions' and @text=" + str(
i) + "]"))
return xx_course_lsit_versions_element
# 人教版
xx_course_versions_rj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='人教版']")
# 苏教版
xx_course_versions_sj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='苏教版']")
# 鲁教版
xx_course_versions_lj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='鲁教版']")
# 北师大
xx_course_versions_bsd_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='北师大']")
# 浙教版
xx_course_versions_zj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='浙教版']")
# 湘教版
xx_course_versions_xj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='湘教版']")
# 冀教版
xx_course_versions_jj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='冀教版']")
# 鄂教版
xx_course_versions_ej_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='鄂教版']")
# 西师大
xx_course_versions_xs_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='西师大']")
# 外研社
xx_course_versions_wys_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='外研社']")
# 语文s版
xx_course_versions_yws_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='语文S版']")
# 沪教版
xx_course_versions_hj_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='沪教版']")
# 教科版
xx_course_versions_jk_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='教科版']")
# 青岛版
xx_course_versions_qd_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='青岛版']")
# 青岛(五四)
xx_course_versions_qdws_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='青岛(五四)']")
# 人教(五四)
xx_course_versions_rjws_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='人教(五四)']")
'''
小学课程--选择年级
'''
# 小学课程-年级列表
xx_course_grade_list = ['一年级', '二年级', '三年级', '四年级', '五年级', '六年级']
def xx_course_grade_lsit_fun():
"""
循环操作小学年级
:return:
"""
xx_course_lsit_grade_element = []
for i in xx_course_grade_list:
xx_course_lsit_grade_element.append((By.XPATH,
"//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions"
"' and @text=" + str(
i) + "]"))
return xx_course_lsit_grade_element
# 一年级
xx_course_grade_one_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='一年级']")
# 二年级
xx_course_grade_two_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='二年级']")
# 三年级
xx_course_grade_three_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='三年级']")
# 四年级
xx_course_grade_four_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='四年级']")
# 五年级
xx_course_grade_five_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='五年级']")
# 六年级
xx_course_grade_six_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_versions' and @text='六年级']")
'''
登录页
'''
# 用户名
login_user_ID = (By.ID, "com.deskmateones:id/act_landing_name")
# 密码
login_password_ID = (By.ID, "com.deskmateones:id/et_landing_password")
# 登录
login_submit_ID = (By.ID, "com.deskmateones:id/bt_landing_submit")
# 忘记密码
login_wjmm_Id = (By.ID, "com.deskmateones:id/tv_landing_wjmm")
# 注册
login_register_ID = (By.ID, "com.deskmateones:id/tv_landing_zhuce")
# 百度一键登录
login_baidu_ID = (By.ID, "com.deskmateones:id/but_baidu")
# 右上角返回按钮
login_back_ID = (By.ID, "com.deskmateones:id/iv_title_back")
'''退出登录页'''
# 修改密码
unlogin_uppassword_XPATH = (
By.XPATH, "//android.widget.Button[@resource-id='com.deskmateones:id/bt_back_pwd' and @text='修改密码']")
# 退出登录
unlogin_unlogin_XPATH = (
By.XPATH, "//android.widget.Button[@resource-id='com.deskmateones:id/bt_back_login' and @text='退出登录']")
# 详情
unlog_details_ID = (By.ID, "com.deskmateones:id/tv_details")
# 确定
queding_XPATH = (
By.XPATH, "//android.widget.Button[@resource-id='com.deskmateones:id/btn_one2one_positive' and @text='确定']")
# 取消
cancel_XPATH = (
By.XPATH, "//android.widget.Button[@resource-id='com.deskmateones:id/btn_one2one_cancel' and @text='取消']")
# 注册
register_XPATH = (
By.XPATH, "//android.widget.Button[@resource-id='com.deskmateones:id/btn_one2one_positive' and @text='注册']")
"""
专题课程
"""
zt_course_list = ['英语专区', '总复习课', '作文辅导', '应用题辅导', '书法辅导', '微课预习', '学习方法', '课文朗诵', '生字听写', '单词听写']
# 英语专区
zt_course_english_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='英语专区']")
# 总复习课
zt_course_zfx_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='总复习课']")
# 作文辅导
zt_course_zw_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='作文辅导']")
# 应用题辅导
zt_course_yyt_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top' and @text='应用题辅导']")
# 书法辅导
zt_course_sffd_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='书法辅导']")
# 微课预习
zt_course_wkyx_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='微课预习']")
# 学习方法
zt_course_xxff_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='学习方法']")
# 课文朗诵
zt_course_kwls_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='课文朗诵']")
# 生字听写
zt_course_sztx_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='生字听写']")
# 单词听写
zt_course_dctx_XPATH = (
By.XPATH, "//android.widget.TextView[@resource-id='com.deskmateones:id/tv_home_top_one' and @text='单词听写']")
# 返回
zt_course_back_ID = (By.ID, "com.deskmateones:id/iv_title_back")
# # 设置按钮
# person_setting_btn_id = (By.ID, "com.tpshop.malls:id/setting_btn")
# # 我的订单
# my_order_btn = (By.XPATH, "//*[contains(@text,'我的订单') and contains(@resource-id,'com.tpshop.malls:id/title_txtv')]")
# """
# 设置页面
# """
# # 安全退出按钮
# logout_btn_id = (By.ID, "com.tpshop.malls:id/exit_btn")
|
"""
Module to wrap an integer in bitwise flag/field accessors.
"""
from collections import OrderedDict
from pcapng.ngsix import namedtuple, Iterable
class FlagBase(object):
"""\
Base class for flag types to be used in a Flags object.
Handles the bitwise math so subclasses don't have to worry about it.
"""
__slots__ = [
'owner',
'offset',
'size',
'extra',
'mask',
]
def __init__(self, owner, offset, size, extra=None):
if size < 1:
raise TypeError('Flag must be at least 1 bit wide')
if size > owner._nbits:
raise TypeError('Flag must fit into owner size')
self.owner = owner
self.offset = offset
self.size = size
self.extra = extra
self.mask = ((1 << self.size)-1) << self.offset
def get_bits(self):
return (self.owner._value & self.mask) >> self.offset
def set_bits(self, val):
val &= (1 << self.size) - 1
self.owner._value &= ~self.mask
self.owner._value |= (val << self.offset)
class FlagBool(FlagBase):
"""Object representing a single boolean flag"""
def __init__(self, owner, offset, size, extra=None):
if size != 1:
raise TypeError('{cls} can only be 1 bit in size'.format(cls=self.__class__.__name__))
super(FlagBool, self).__init__(owner, offset, size)
def get(self):
return bool(self.get_bits())
def set(self, val):
self.set_bits(int(bool(val)))
class FlagUInt(FlagBase):
"""\
Object representing an unsigned integer of the given size stored in
a larger bitfield
"""
def get(self):
return self.get_bits()
def set(self, val):
self.set_bits(val)
class FlagEnum(FlagBase):
"""\
Object representing a range of values stored in part of a larger
bitfield
"""
def __init__(self, owner, offset, size, extra=None):
if not isinstance(extra, Iterable):
raise TypeError('{cls} needs an iterable of values'.format(cls=self.__class__.__name__))
extra = list(extra)
if len(extra) > 2**size:
raise TypeError('{cls} iterable has too many values (got {got}, {size} bits only address {max})'.format(cls=self.__class__.__name__, got=len(extra), size=size, max=2**size))
super(FlagEnum, self).__init__(owner, offset, size, extra)
def get(self):
val = self.get_bits()
try:
return self.extra[val]
except IndexError:
return '[invalid value]'
def set(self, val):
if val in self.extra:
self.set_bits(self.extra.index(val))
elif isinstance(val, int):
self.set_bits(val)
else:
raise TypeError('Invalid value {val} for {cls}'.format(val=val, cls=self.__class__.__name__))
# Class representing a single flag schema for FlagWord.
# 'nbits' defaults to 1, and 'extra' defaults to None.
FlagField = namedtuple('FlagField', ('name', 'ftype', 'nbits', 'extra'),
defaults=(1, None))
class FlagWord(object):
"""\
Class to wrap an integer in bitwise flag/field accessors.
"""
__slots__ = [
'_nbits',
'_value',
'_schema',
]
def __init__(self, schema, nbits=32, initial=0):
"""
:param schema:
A list of FlagField objects representing the values to be packed
into this object, in order from LSB to MSB of the underlying int
:param nbits:
An integer representing the total number of bits used for flags
:param initial:
The initial integer value of the flags field
"""
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if tot_bits > nbits:
raise TypeError("Too many fields for {nbits}-bit field (schema defines {tot} bits)".format(nbits=nbits, tot=tot_bits))
bitn = 0
for item in schema:
if not isinstance(item, FlagField):
raise TypeError('Schema must be composed of FlagField objects')
if not issubclass(item.ftype, FlagBase):
raise TypeError('Expected FlagBase, got {}'.format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
def __int__(self):
return self._value
def __repr__(self):
rv = '<{0} (value={1})'.format(self.__class__.__name__, self._value)
for k, v in self._schema.items():
rv += ' {0}={1}'.format(k, v.get())
return rv+'>'
def __getattr__(self, name):
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.get()
def __setattr__(self, name, val):
try:
return object.__setattr__(self, name, val)
except AttributeError:
pass
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.set(val)
if __name__ == '__main__':
f = FlagWord([
FlagField('inout', FlagEnum, 2, ('NA', 'inbound', 'outbound')),
FlagField('casttype', FlagEnum, 3, ('NA', 'unicast', 'multicast', 'broadcast', 'promiscuous')),
FlagField('fcslen', FlagUInt, 4),
FlagField('reserved', FlagUInt, 7),
FlagField('err_16', FlagBool),
FlagField('err_17', FlagBool),
FlagField('err_18', FlagBool),
FlagField('err_19', FlagBool),
FlagField('err_20', FlagBool),
FlagField('err_21', FlagBool),
FlagField('err_22', FlagBool),
FlagField('err_23', FlagBool),
FlagField('err_crc', FlagBool),
FlagField('err_long', FlagBool),
FlagField('err_short', FlagBool),
FlagField('err_frame_gap', FlagBool),
FlagField('err_frame_align', FlagBool),
FlagField('err_frame_delim', FlagBool),
FlagField('err_preamble', FlagBool),
FlagField('err_symbol', FlagBool),
])
f.fcslen = 12
print(f)
print(int(f))
|
import numpy as np
from datasets import load_dataset
from transformers import pipeline
from dataset import KeyValueDataset
from tasks.TaskTypes import TaskType
def evaluate(
operation,
evaluate_filter,
model_name,
dataset_name,
split="validation[:20%]",
):
# (1) load model
if model_name is None:
model_name = "mrm8488/bert-tiny-5-finetuned-squadv2"
# (2) load test set
if dataset_name is None:
dataset_name = "squad"
print(
f"Loading <{dataset_name}> dataset to evaluate <{model_name}> model."
)
hf_dataset = load_dataset(dataset_name, split=split)
qa_pipeline = pipeline(
"question-answering", model=model_name, tokenizer=model_name
)
dataset = KeyValueDataset.from_huggingface(
hf_dataset,
TaskType.QUESTION_ANSWERING,
["context", "question", "answers"],
)
print(
f"Here is the performance of the model {model_name} on the {split} split of the {dataset_name} dataset"
)
if evaluate_filter:
filtered_dataset = dataset.apply_filter(operation)
print("Starting evaluation on the filtered dataset.")
performance = evaluate_on_dataset(filtered_dataset, qa_pipeline)
else:
print("Starting evaluation on the original dataset.")
performance = evaluate_on_dataset(dataset, qa_pipeline)
print("Starting evaluation on the transformed dataset.")
pt_dataset = dataset.apply_transformation(operation)
pt_performance = evaluate_on_dataset(pt_dataset, qa_pipeline)
performance["pt_accuracy"] = pt_performance["accuracy"]
# (3) Execute perturbation
# (4) Execute the performance of the original set and the perturbed set
performance["model_name"] = model_name
performance["split"] = split
performance["dataset_name"] = dataset_name
return performance
def evaluate_on_dataset(dataset, qa_pipeline):
accuracy = 0
total = 0
for example in dataset:
context, question, answers = example
prediction = qa_pipeline(
{"context": context, "question": question}, truncation=True
)["answer"]
if prediction in answers:
accuracy += 1
total += 1
print(f"The number of examples = {total}")
print(f"The accuracy of exact matching = {100 * accuracy / total}")
return {
"accuracy": np.round(100 * accuracy / total, 1),
"no_of_examples": total,
}
|
# -*- coding: utf-8 -*-
"""Created on Sun Sep 8 13:28:53 2019.
@author: Julian Märte
Updated by: Brendan O'Dongohue, bodonoghue85@gmail.com, Oct 14th 2020
"""
import re
import numpy as np
import scipy.sparse
CORE_FILE_ROW_MODE = 'ROWS'
CORE_FILE_COL_MODE = 'COLUMNS'
CORE_FILE_RHS_MODE = 'RHS'
CORE_FILE_BOUNDS_MODE = 'BOUNDS'
CORE_FILE_BOUNDS_MODE_NAME_GIVEN = 'BOUNDS_NAME'
CORE_FILE_BOUNDS_MODE_NO_NAME = 'BOUNDS_NO_NAME'
CORE_FILE_RHS_MODE_NAME_GIVEN = 'RHS_NAME'
CORE_FILE_RHS_MODE_NO_NAME = 'RHS_NO_NAME'
ROW_MODE_OBJ = 'N'
def load_mps(path):
mode = ''
name = None
objective_name = None
row_names = []
types = []
col_names = []
col_types = []
A = scipy.sparse.dok_matrix((0, 0), dtype=np.float64)
c = np.array([])
rhs_names = []
rhs = {}
bnd_names = []
bnd = {}
integral_marker = False
with open(path, 'r') as reader:
for line in reader:
line = re.split(' |\t', line)
line = [x.strip() for x in line]
line = list(filter(None, line))
if line[0] == 'ENDATA':
break
if line[0] == '*':
continue
if line[0] == 'NAME':
name = line[1]
elif line[0] in [CORE_FILE_ROW_MODE, CORE_FILE_COL_MODE]:
mode = line[0]
elif line[0] == CORE_FILE_RHS_MODE and len(line) <= 2:
if len(line) > 1:
rhs_names.append(line[1])
rhs[line[1]] = np.zeros(len(row_names))
mode = CORE_FILE_RHS_MODE_NAME_GIVEN
else:
mode = CORE_FILE_RHS_MODE_NO_NAME
elif line[0] == CORE_FILE_BOUNDS_MODE and len(line) <= 2:
if len(line) > 1:
bnd_names.append(line[1])
bnd[line[1]] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
mode = CORE_FILE_BOUNDS_MODE_NAME_GIVEN
else:
mode = CORE_FILE_BOUNDS_MODE_NO_NAME
elif mode == CORE_FILE_ROW_MODE:
if line[0] == ROW_MODE_OBJ:
objective_name = line[1]
else:
types.append(line[0])
row_names.append(line[1])
elif mode == CORE_FILE_COL_MODE:
if len(line) > 1 and line[1] == "'MARKER'":
if line[2] == "'INTORG'":
integral_marker = True
elif line[2] == "'INTEND'":
integral_marker = False
continue
try:
i = col_names.index(line[0])
except:
if A.shape[1] == 0:
A = scipy.sparse.dok_matrix(
(len(row_names), 1), dtype=np.float64)
else:
new_col = scipy.sparse.dok_matrix(
(len(row_names), 1), dtype=np.float64)
A = scipy.sparse.hstack((A, new_col), format='dok')
col_names.append(line[0])
col_types.append(integral_marker * 'integral' +
(not integral_marker) * 'continuous')
c = np.append(c, 0)
i = -1
j = 1
while j < len(line) - 1:
if line[j] == objective_name:
c[i] = float(line[j + 1])
else:
A[row_names.index(line[j]), i] = float(line[j + 1])
j = j + 2
elif mode == CORE_FILE_RHS_MODE_NAME_GIVEN:
if line[0] != rhs_names[-1]:
raise Exception(
'Other RHS name was given even though name was set after RHS tag.')
for kk in range((len(line) - 1) // 2):
idx = kk * 2
try:
rhs[line[0]][row_names.index(
line[idx+1])] = float(line[idx+2])
except Exception as e:
if objective_name == line[idx+1]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
elif mode == CORE_FILE_RHS_MODE_NO_NAME:
if len(line) % 2 == 1: # odd: RHS named
try:
i = rhs_names.index(line[0])
except:
rhs_names.append(line[0])
rhs[line[0]] = np.zeros(len(row_names))
i = -1
for kk in range((len(line) - 1) // 2):
idx = kk * 2
try:
rhs[line[0]][row_names.index(
line[idx+1])] = float(line[idx+2])
except Exception as e:
if objective_name == line[idx+1]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
else: # even, no RHS name
try:
i = rhs_names.index("TEMP")
except:
rhs_names.append("TEMP")
rhs["TEMP"] = np.zeros(len(row_names))
i = -1
for kk in range(len(line) // 2):
idx = kk * 2
try:
rhs["TEMP"][row_names.index(
line[idx])] = float(line[idx+1])
except Exception as e:
if objective_name == line[idx]:
print("MPS read warning: objective appearing in RHS, ignoring")
else:
raise e
elif mode == CORE_FILE_BOUNDS_MODE_NAME_GIVEN:
if line[1] != bnd_names[-1]:
raise Exception(
'Other BOUNDS name was given even though name was set after BOUNDS tag.')
if line[0] in ['LO', 'UP']:
bnd[line[1]][line[0]][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'FX':
bnd[line[1]]['LO'][col_names.index(
line[2])] = float(line[3])
bnd[line[1]]['UP'][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'PL': # free positive (aka default)
bnd[line[1]]['LO'][col_names.index(line[2])] = 0
elif line[0] == 'FR': # free
bnd[line[1]]['LO'][col_names.index(line[2])] = -np.inf
elif line[0] == 'BV': # binary value
bnd[line[1]]['LO'][col_names.index(
line[2])] = 0.
bnd[line[1]]['UP'][col_names.index(
line[2])] = 1.
elif mode == CORE_FILE_BOUNDS_MODE_NO_NAME:
_bnds = ['FR', 'BV', 'PL']
if (len(line) % 2 == 0 and line[0] not in _bnds) or (len(line) % 2 == 1 and line[0] in _bnds): # even, bound has name
try:
i = bnd_names.index(line[1])
except:
bnd_names.append(line[1])
bnd[line[1]] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
i = -1
if line[0] in ['LO', 'UP']:
bnd[line[1]][line[0]][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'FX': # fixed
bnd[line[1]]['LO'][col_names.index(
line[2])] = float(line[3])
bnd[line[1]]['UP'][col_names.index(
line[2])] = float(line[3])
elif line[0] == 'PL': # free positive (aka default)
bnd[line[1]]['LO'][col_names.index(line[2])] = 0
elif line[0] == 'FR': # free
bnd[line[1]]['LO'][col_names.index(line[2])] = -np.inf
elif line[0] == 'BV': # binary value
bnd[line[1]]['LO'][col_names.index(
line[2])] = 0.
bnd[line[1]]['UP'][col_names.index(
line[2])] = 1.
else: # odd, bound has no name
try:
i = bnd_names.index("TEMP_BOUND")
except:
bnd_names.append("TEMP_BOUND")
bnd["TEMP_BOUND"] = {'LO': np.zeros(
len(col_names)), 'UP': np.repeat(np.inf, len(col_names))}
i = -1
if line[0] in ['LO', 'UP']:
bnd["TEMP_BOUND"][line[0]][col_names.index(
line[1])] = float(line[2])
elif line[0] == 'FX':
bnd["TEMP_BOUND"]['LO'][col_names.index(
line[1])] = float(line[2])
bnd["TEMP_BOUND"]['UP'][col_names.index(
line[1])] = float(line[2])
elif line[0] == 'FR':
bnd["TEMP_BOUND"]['LO'][col_names.index(line[1])] = -np.inf
return dict(name=name, objective_name=objective_name, row_names=row_names,
col_names=col_names, col_types=col_types, types=types, c=c, A=A,
rhs_names=rhs_names, rhs=rhs, bnd_names=bnd_names, bnd=bnd)
|
from pathlib import Path
import gzip
files = list(Path('./jsons').glob('*'))
for idx, path in enumerate(files):
#print(path)
last_hash = str(path).split('/')[-1]
if Path(f'htmls/{last_hash}').exists():
print('exist', idx, len(files), last_hash)
with Path(f'htmls/{last_hash}').open('wb') as fp:
fp.write(gzip.compress(bytes('finish', 'utf8')))
else:
...
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
import sys
class ObjectHandler(ContentHandler):
name = ""
defparam = ""
params = []
current = {}
doc = ""
def default(self, attrs):
val = attrs.get("default");
if (val == "-int_MAX"):
return "-1.0E21"
if (val == "int_MAX"):
return "1.0E21"
if (val == "floatarray()"):
return "''"
if (val == "intarray()"):
return "''"
if (val == "stringarray()"):
return "''"
if (len(val) == 0):
return "''"
if (val == "(automatic)"):
return "'(automatic)'"
return val
def characters(self, data):
pass
def toggle(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
s = s + "\t\ton; on\n"
s = s + "\t\toff; off\n"
s = s + "\t} = %s\n" % self.default(attrs)
return s
def any(self, attrs):
s = "DOCUMENATION "
s = s + "\t} = %s" % self.default(attrs)
return s
def number(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
s = s + "\t\t*\n"
s = s + "\t} = %s\n" % self.default(attrs)
return s
def listofnumbers(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
s = s + "\t\t*\n"
s = s + "\t\t/\n"
s = s + "\t} = %s\n" % self.default(attrs)
return s
def listofstrings(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
s = s + "\t\t@\n"
s = s + "\t\t/\n"
s = s + "\t} = %s\n" % self.default(attrs)
return s
def colour(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
s = s + "\t\tred; red\n"
s = s + "\t\t@\n"
s = s + "\t} = %s\n" % self.default(attrs)
return s
def linestyle(self, attrs):
s = "here we found a line "
s = s + "\t} DEFAULT %s\n" % self.default(attrs)
return s
def options(self, attrs):
s = "\t%s\n\t{\n" % attrs.get("name")
self.defparam = self.default(attrs)
return s
def normal(self, param):
s = "<tr>\n"
s = s + " <td><strong>%s</strong></td><td>%s</td>\n" % (param["name"], param["from"])
s = s + " <td>%s</td><td>%s</td>\n" % (param["doc"], param["default"])
s = s + "</tr>\n"
return s
def header(self, param):
s = "<tr style='background-color: rgb(204, 204, 255);'>\n"
s = s + " <td colspan='4'><strong>%s (%s)</strong></td>\n" % (param["name"], param["value"])
s = s + "</tr>\n"
return s
types = { "normal" : normal,
"header" : header}
def newparam(self, param, value, default):
for p in self.params:
if p["name"] == param:
selfcurrent = p
return
self.current = {"name" : param, "type": "normal", "from" : value, "default" : default, "doc":""}
self.params.append(self.current)
def startElement(self, name, attrs):
if (name == "magics") :
return
if (name == "class"):
self.name = attrs.get("name")
self.inherits = attrs.get("inherits")
if self.inherits != '':
try:
file = open("xml/%s.xml" % attrs.get("inherits"), "r")
print " inherits ->%s" % self.inherits
object = ObjectHandler()
parser = make_parser()
object.params = []
parser.setContentHandler(object)
parser.parse(file)
self.params.append(s)
for s in object.params:
self.params.append(s)
except:
pass
if (name == "documentation"):
self.doc = ""
if (name == "parameter"):
self.param = attrs.get("name")
print ("name %s"%self.param)
if (attrs.get("implemented") == 'no'):
return
type = attrs.get("to")
fromtype = attrs.get("from")
deftype = attrs.get("default")
self.newparam(self.param, fromtype, deftype)
if (name == "option"):
object = ObjectHandler()
object.params = []
object.doc = ""
if (attrs.get("docdive") == 'no'):
return
parser = make_parser()
parser.setContentHandler(object)
file = open("xml/%s.xml" % attrs.get("name"), "r")
value = attrs.get("fortran");
if (attrs.get("fortran") != attrs.get("xml")) :
value = "%s/%s"%(attrs.get("fortran"), attrs.get("xml"))
p = {"name" : self.param, "value" : value , "type" : "header"}
self.params.append(p)
parser.parse(file)
for s in object.params:
self.params.append(s)
def characters(self, doc):
self.doc = self.doc + doc
def endElement(self, name):
if (name == "magics") :
return
if (name == "documentation") :
self.current["doc"] = self.doc
pass
if (name == "class") :
filename = "xml/%s.html" % self.name
# <tr style="background-color: rgb(204, 204, 255);">
definition = open(filename, "w")
definition.write( "<h3>%s parameters</h3>\n" % self.name)
definition.write( '<table cellspacing="1" cellpadding="1" border="1" style="width: 100%;">\n')
definition.write( '<tbody><font face="arial,sans-serif">\n')
definition.write( "<tr>\n")
definition.write( " <td><strong>Parameter</strong></td><td><strong>Type</strong></td>\n")
definition.write( " <td><strong>Documentation</strong></td><td><strong>Default</strong></td>\n")
definition.write( "</tr>\n")
for param in self.params:
s = self.types[param["type"]](self, param)
definition.write(s)
definition.write( "</font></tbody>\n")
definition.write( '</table>\n')
object = ObjectHandler()
saxparser = make_parser()
saxparser.setContentHandler(object)
datasource = open(sys.argv[1], "r")
saxparser.parse(datasource)
|
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
KEY_NAMES = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
CIRCLE_OF_FIFTHS = [7 * k % 12 for k in range(12)]
# KEY_CENTERS = CIRCLE_OF_FIFTHS[0:6] + CIRCLE_OF_FIFTHS[-1:5:-1]
MUSICAL_MODES = [
'Major', [0, 2, 4, 5, 7, 9, 11],
'Minor', [0, 2, 3, 5, 7, 8, 10],
'Dorian', [0, 2, 3, 5, 7, 9, 10],
'Mixolydian', [0, 2, 4, 5, 7, 9, 10],
'Lydian', [0, 2, 4, 6, 7, 9, 11],
'Phrygian', [0, 1, 3, 5, 7, 8, 10],
'Locrian', [0, 1, 3, 5, 6, 8, 10],
'Diminished', [0, 1, 3, 4, 6, 7, 9, 10],
'Whole-half', [0, 2, 3, 5, 6, 8, 9, 11],
'Whole Tone', [0, 2, 4, 6, 8, 10],
'Minor Blues', [0, 3, 5, 6, 7, 10],
'Minor Pentatonic', [0, 3, 5, 7, 10],
'Major Pentatonic', [0, 2, 4, 7, 9],
'Harmonic Minor', [0, 2, 3, 5, 7, 8, 11],
'Melodic Minor', [0, 2, 3, 5, 7, 9, 11],
'Super Locrian', [0, 1, 3, 4, 6, 8, 10],
'Bhairav', [0, 1, 4, 5, 7, 8, 11],
'Hungarian Minor', [0, 2, 3, 6, 7, 8, 11],
'Minor Gypsy', [0, 1, 4, 5, 7, 8, 10],
'Hirojoshi', [0, 2, 3, 7, 8],
'In-Sen', [0, 1, 5, 7, 10],
'Iwato', [0, 1, 5, 6, 10],
'Kumoi', [0, 2, 3, 7, 9],
'Pelog', [0, 1, 3, 4, 7, 8],
'Spanish', [0, 1, 3, 4, 5, 6, 8, 10],
'IonEol', [0, 2, 3, 4, 5, 7, 8, 9, 10, 11]
]
TOP_OCTAVE = {"chromatic_gtr": 7, "diatonic_ns": 2, "diatonic_chords": 7, "diatonic": 6,
"chromatic": 7}
class ScaleComponent(ControlSurfaceComponent):
#matrix = control_matrix(PlayableControl)
def __init__(self, control_surface = None, enabled = False, mode = "diatonic", *a, **k):
self._layout_set = False
self._modus_list = [Modus(MUSICAL_MODES[v], MUSICAL_MODES[v + 1]) for v in xrange(0, len(MUSICAL_MODES), 2)]
self._modus_names = [MUSICAL_MODES[v] for v in xrange(0, len(MUSICAL_MODES), 2)]
self._control_surface = control_surface
self._osd = None
self._modus = 0
self._key = 0
self._octave = 3
self._mode = mode #diatonic
self._is_drumrack = False
self._quick_scale = False
self._is_horizontal = True
self._is_absolute = False
self._interval = 3
self._matrix = None
self._top_octave = 6
# C D E F G A B
self._white_notes_index = [0, 2, 4, 5, 7, 9, 11]
self._current_minor_mode = 1 #Natural
self._minor_modes = [1, 13, 14]#Natural, Harmonic, Melodic
super(ScaleComponent, self).__init__(*a, **k)
self.set_enabled(enabled)
@property
def notes(self):
return self.modus.scale(self._key).notes
@property
def modus(self):
return self._modus_list[self._modus]
def set_key(self, key, message = True):
if key>=0 and key<=11:
self._key = key % 12
if message:
self._control_surface.show_message(str("Selected Scale: " + KEY_NAMES[self._key])+" "+str(self._modus_names[self._modus]))
def set_octave(self, octave, message = True):
if octave>=0 and octave<self._top_octave:
self._octave = octave
if message:
self._control_surface.show_message("Selected octave: " + str(octave))
def _set_top_octave(self, message = True):
if(self.is_drumrack):
self._top_octave = 8
else:
self._top_octave = TOP_OCTAVE[self._mode]
if(self._octave>=self._top_octave):
self._octave = self._top_octave -1
if message:
self._control_surface.show_message("Selected octave: " + str(self._octave))
def octave_up(self, message = True):
self.set_octave(self._octave + 1, message)
def octave_down(self, message = True):
self.set_octave(self._octave - 1, message)
def set_modus(self, index, message = True):
if index > -1 and index < len(self._modus_list):
self._modus = index
if message:
self._control_surface.show_message(str("selected scale: " + KEY_NAMES[self._key])+" "+str(self._modus_names[self._modus]))
def set_drumrack(self, drumrack):
self._is_drumrack = drumrack
self._set_top_octave(True)
#def set_matrix(self, matrix):
# if not matrix or not self._layout_set:
# self._matrix = matrix
# #self._matrix.set_control_element(matrix)
# for index, button in enumerate(self._matrix):
# #button.set_playable(False)
# self._layout_set = bool(matrix)
# self.update()
def set_matrix(self, matrix):
self._matrix = matrix
if matrix:
matrix.reset()
if (matrix != self._matrix):
if (self._matrix != None):
self._matrix.remove_value_listener(self._matrix_pressed)
self._matrix = matrix
if (self._matrix != None):
self._matrix.add_value_listener(self._matrix_pressed)
self.update()
def set_osd(self, osd):
self._osd = osd
def _update_OSD(self):
if self._osd != None:
self._osd.attributes[0] = ""
self._osd.attribute_names[0] = ""
self._osd.attributes[1] = MUSICAL_MODES[self._modus * 2]
self._osd.attribute_names[1] = "Scale"
self._osd.attributes[2] = KEY_NAMES[self._key % 12]
self._osd.attribute_names[2] = "Root Note"
self._osd.attributes[3] = self._octave
self._osd.attribute_names[3] = "Octave"
self._osd.attributes[4] = " "
self._osd.attribute_names[4] = " "
self._osd.attributes[5] = " "
self._osd.attribute_names[5] = " "
self._osd.attributes[6] = " "
self._osd.attribute_names[6] = " "
self._osd.attributes[7] = " "
self._osd.attribute_names[7] = " "
self._osd.update()
def update(self):
if self.is_enabled() and self._matrix!=None:
#self._control_surface.log_message("update scale: "+str(self._matrix))
super(ScaleComponent, self).update()
self._update_OSD()
#for index, button in enumerate(self._matrix):
for button, (col, row) in self._matrix.iterbuttons():
#row, col = button.coordinate
button.set_enabled(True)
if row==0:
if col == 0:
if self._is_absolute:
button.set_light("Scale.AbsoluteRoot.On")
else:
button.set_light("Scale.AbsoluteRoot.Off")
elif col == 1:
if self._is_horizontal:
button.set_light("Scale.Horizontal.On")
else:
button.set_light("Scale.Horizontal.Off")
elif col == 2:
if not self.is_drumrack and self._mode == "chromatic_gtr":
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif col == 3:
if not self.is_drumrack and self._mode == "diatonic_ns":
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif col == 4:
if not self.is_drumrack and self._mode == "diatonic_chords":
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif col == 5:
if not self.is_drumrack and self._mode == "diatonic":
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif col == 6:
if not self.is_drumrack and self._mode == "chromatic":
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif col == 7:
if self.is_drumrack:
button.set_light("Scale.Mode.On")
else:
button.set_light("Scale.Mode.Off")
elif row==1:
if self.is_drumrack:
if col==7:
if self._quick_scale:
button.set_light("Scale.QuickScale.On")
else:
button.set_light("Scale.QuickScale.Off")
else:
button.set_light("DefaultButton.Disabled")
else:
if col==0 or col==1 or col==3 or col==4 or col==5:
if self._key == self._white_notes_index[col]+1:
button.set_light("Scale.Key.On")
else:
button.set_light("Scale.Key.Off")
elif col==2:
button.set_light("Scale.RelativeScale")
elif col==6:
button.set_light("Scale.CircleOfFifths")
elif col==7:
if self._quick_scale:
button.set_light("Scale.QuickScale.On")
else:
button.set_light("Scale.QuickScale.Off")
elif row==2:
if self.is_drumrack:
button.set_light("DefaultButton.Disabled")
else:
if col<7:
if self._key == self._white_notes_index[col]:
button.set_light("Scale.Key.On")
else:
button.set_light("Scale.Key.Off")
else:
button.set_light("Scale.CircleOfFifths")
elif row==3:
if self._octave == col:
button.set_light("Scale.Octave.On")
else:
if(col < self._top_octave):
button.set_light("Scale.Octave.Off")
else:
button.set_light("DefaultButton.Disabled")
elif row==4:
if self.is_drumrack:
button.set_light("DefaultButton.Disabled")
else:
if self._modus == col:
button.set_light("Scale.Modus.On")
else:
button.set_light("Scale.Modus.Off")
elif row==5:
if self.is_drumrack:
button.set_light("DefaultButton.Disabled")
else:
if self._modus == col+8:
button.set_light("Scale.Modus.On")
else:
button.set_light("Scale.Modus.Off")
elif row==6:
if self.is_drumrack:
button.set_light("DefaultButton.Disabled")
else:
if self._modus == col+16:
button.set_light("Scale.Modus.On")
else:
button.set_light("Scale.Modus.Off")
elif row==7:
if self.is_drumrack:
button.set_light("DefaultButton.Disabled")
else:
if col+24>len(self._modus_list):
button.set_light("DefaultButton.Disabled")
elif self._modus == col+24:
button.set_light("Scale.Modus.On")
else:
button.set_light("Scale.Modus.Off")
#button.set_enabled(False)
#button.update()
#@matrix.pressed
def _matrix_pressed(self, value, x, y, is_momentary):
message = True
if self.is_enabled() and value>0:
#y, x = pad.coordinate
# modes
if y == 0:
#SCALE MODE SELECTION LOGIC
if not self.is_drumrack:
if x == 0:
self._is_absolute = not self._is_absolute
if self._is_absolute:
self._control_surface.show_message("absolute root")
else:
self._control_surface.show_message("relative root")
if x == 1:
if self.is_diatonic:
self._is_horizontal = not self._is_horizontal
if self._is_horizontal:
self._control_surface.show_message("Is Horizontal")
else:
self._control_surface.show_message("Is Vertical")
if x == 2:
self._mode = "chromatic_gtr"
self._is_drumrack = False
self._interval=3
self._is_horizontal= True
self._control_surface.show_message("mode: chromatic gtr")
if x == 3:
self._mode = "diatonic_ns"
self._is_drumrack = False
self._interval=3
self._is_horizontal= True
self._control_surface.show_message("mode: diatonic not staggered")
if x == 4:
self._mode="diatonic_chords"
self._is_drumrack = False
self._interval=2
self._is_horizontal= False
self._control_surface.show_message("mode: diatonic vertical (chords)")
if x == 5:
self._mode="diatonic"
self._is_drumrack = False
self._interval=3
self._is_horizontal= True
self._control_surface.show_message("mode: diatonic")
if x == 6:
self._mode="chromatic"
self._is_drumrack = False
self._interval=3
self._is_horizontal=True
self._control_surface.show_message("mode: chromatic")
if x == 7:
self.set_drumrack(True)
self._control_surface.show_message("mode: drumrack")
self._set_top_octave(True)
#ROOT/SCALE SELECTION LOGIC
keys = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]
# root note
if not self.is_drumrack:
root = -1
selected_key = self._key
selected_modus = self._modus
if y == 1 and x in[0, 1, 3, 4, 5] or y == 2 and x < 7:
root = [0, 2, 4, 5, 7, 9, 11, 12][x]
if y == 1:
root = root + 1
# if root == selected_key:#alternate minor/major
# if selected_modus==0:
# selected_modus = self._current_minor_mode
# elif selected_modus in [1,13,14]:
# self._current_minor_mode = selected_modus
# selected_modus = 0
# elif selected_modus==11:
# selected_modus = 12
# elif selected_modus==12:
# selected_modus = 11
# nav circle of 5th right ->
if y == 2 and x == 7:
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) + 1 + 12) % 12]
self._control_surface.show_message("circle of 5ths -> "+keys[selected_key]+" "+str(self._modus_names[selected_modus])+" => "+keys[root]+" "+str(self._modus_names[selected_modus]))
message = False
# nav circle of 5th left <-
if y == 1 and x == 6:
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) - 1 + 12) % 12]
self._control_surface.show_message("circle of 5ths <- "+keys[selected_key]+" "+str(self._modus_names[selected_modus])+" => "+keys[root]+" "+str(self._modus_names[selected_modus]))
message = False
# relative major/minor scale
if y == 1 and x == 2:
if self._modus == 0: #Ionian Mode (Major)
selected_modus = self._current_minor_mode
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) + 3) % 12] # Jump up 3 steps in 5th Circle equals jumping a third minor down
elif self._modus in [1, 13, 14]:#Natural (Aeolian), Harmonic, Melodic Minor
self._current_minor_mode = selected_modus
selected_modus = 0
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) - 3 + 12) % 12] # Jump down 3 steps in 5th Circle equals jumping a third minor up
elif self._modus == 11: #Minor Pentatonic
selected_modus = 12
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) - 3) % 12]
elif self._modus == 12: #Major Pentatonic
selected_modus = 11
root = CIRCLE_OF_FIFTHS[(CIRCLE_OF_FIFTHS.index(selected_key) + 3 + 12) % 12]
self._control_surface.show_message("Relative scale : "+keys[root]+" "+str(self._modus_names[selected_modus]))
message = False
if root != -1:
self.set_modus(selected_modus, message)
self.set_key(root, message)
#QuickScale
if y == 1 and x == 7: #and not self.is_drumrack:
self._quick_scale = not self._quick_scale
if self._quick_scale:
self._control_surface.show_message("Quick scale ON")
else:
self._control_surface.show_message("Quick scale OFF")
# octave
if y == 3:
self.set_octave(x)
self._control_surface.show_message("octave : " + str(self._octave))
# modus (Scale)
if y > 3 and not self.is_drumrack:
self.set_modus(((y - 4) * 8 + x),message)
self._control_surface.show_message(str("Selected Scale: " + KEY_NAMES[self._key])+" "+str(self._modus_names[self._modus]))
self.update()
#@matrix.released
def matrix_release(self, pad):
pass
# selected_drum_pad = self._coordinate_to_pad_map[pad.coordinate]
# if selected_drum_pad in self._selected_pads:
# self._selected_pads.remove(selected_drum_pad)
# if not self._selected_pads:
# self._update_control_from_script()
# self.notify_pressed_pads()
# self._update_led_feedback()
@property
def is_drumrack(self):
return self._is_drumrack
@property
def is_diatonic(self):
return not self.is_drumrack and (self._mode == "diatonic" or self._mode == "diatonic_ns" or self._mode == "diatonic_chords")
@property
def is_chromatic(self):
return not self.is_drumrack and (self._mode =="chromatic" or self._mode == "chromatic_gtr")
@property
def is_diatonic_ns(self):
return self._mode == "diatonic_ns"
@property
def is_chromatic_gtr(self):
return self._mode == "chromatic_gtr"
@property
def is_quick_scale(self):
return self._quick_scale
def get_pattern(self):
notes = self.notes
# origin
if not self._is_absolute:
origin = 0
elif self.is_diatonic:
origin = 0
for k in xrange(len(notes)):
if notes[k] >= 12:
origin = k - len(notes)
break
else:
origin = -notes[0]
# interval
if self._interval == None:
interval = 8
elif self.is_chromatic:
interval = [0, 2, 4, 5, 7, 9, 10, 11][self._interval]
else:
interval = self._interval
# layout
if self._is_horizontal:
steps = [1, interval]
origin = [origin, 0]
else:
steps = [interval, 1]
origin = [0, origin]
return MelodicPattern(
steps = steps,
scale = notes,
origin = origin,
base_note = (self._octave + 1) * 12,
chromatic_mode = self.is_chromatic,
chromatic_gtr_mode = self.is_chromatic_gtr,
diatonic_ns_mode = self.is_diatonic_ns
)
class Scale(object):
# Input vars: scale name, array of scale steps -> (ScaleName, ScaleSteps)
def __init__(self, name, notes, *a, **k):
super(Scale, self).__init__(*a, **k)
self.name = name
self.notes = notes
class Modus(Scale):
def __init__(self, *a, **k):
super(Modus, self).__init__(*a, **k)
def scale(self, base_note):
return Scale(KEY_NAMES[base_note], [base_note + x for x in self.notes])
def scales(self, base_notes):
return [self.scale(b) for b in base_notes]
class MelodicPattern(object):
def __init__(self,
steps=[0, 0],
scale=range(12),
base_note=0,
origin=[0, 0],
valid_notes=xrange(128),
chromatic_mode=False,
chromatic_gtr_mode=False,
diatonic_ns_mode=False,
*a, **k):
super(MelodicPattern, self).__init__(*a, **k)
self.steps = steps
self.scale = scale
self.base_note = base_note
self.origin = origin
self.valid_notes = valid_notes
self.chromatic_mode = chromatic_mode
self.chromatic_gtr_mode = chromatic_gtr_mode
self.diatonic_ns_mode = diatonic_ns_mode
class NoteInfo:
def __init__(self, index, channel, root = False, highlight = False, in_scale = False, valid = False):
self.index = index
self.channel = channel
self.root = root
self.highlight = highlight
self.in_scale = in_scale
self.valid = valid
@property
def _extended_scale(self):
if self.chromatic_mode:
first_note = self.scale[0]
return range(first_note, first_note + 12)
else:
return self.scale
def _octave_and_note(self, x, y):
scale = self._extended_scale
scale_size = len(scale)
if self.chromatic_mode:
self.steps[1] = 5
else:
if self.diatonic_ns_mode:
self.steps[1] = scale_size
index = self.steps[0] * (self.origin[0] + x) + self.steps[1] * (self.origin[1] + y)
if self.chromatic_gtr_mode and y > 3:
index = index - 1
octave = index / scale_size
note = scale[index % scale_size]
return (octave, note)
def note(self, x, y):
octave, note = self._octave_and_note(x, y)
index = (self.base_note + 12 * octave + note) % 128
root = note == self.scale[0]
highlight = note == self.scale[2] or note == self.scale[4]
in_scale = note in self.scale
valid = index in self.valid_notes
return self.NoteInfo(
index,
x,
root = root,
highlight = highlight,
in_scale = in_scale,
valid = valid
)
|
from artifactory_cleanup import rules
import custom_rules
from policy import RULES
def test_repo_rules():
for repo_rules in RULES:
assert isinstance(repo_rules.name, str)
def test_keep_latest_n_version():
rule = rules.keep_latest_nupkg_n_version(2)
result = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.108",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.113",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.109-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.111-Feature",
},
},
]
result_expexted = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.108",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.109-Feature",
},
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_version_with_tar_gz():
rule = rules.keep_latest_nupkg_n_version(1)
result = [
{
"name": ".tar.gz",
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.113",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.111-Feature",
},
},
]
result_expexted = [
{
"name": ".tar.gz",
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_version_one():
rule = rules.keep_latest_nupkg_n_version(1)
result = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.113",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.111-Feature",
},
},
]
result_expexted = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_version_empty():
rule = rules.keep_latest_nupkg_n_version(2)
result = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.113",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.110-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.111-Feature",
},
},
]
result_expexted = []
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_version_patch():
rule = rules.keep_latest_nupkg_n_version(2)
result = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.2.109-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.1.111-Feature",
},
},
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.1.110-Feature",
},
},
]
result_to_delete = [
{
"name": ".nupkg",
"properties": {
"nuget.id": "Package",
"nuget.version": "16.0.1.110-Feature",
},
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_to_delete
def test_keep_latest_n_file():
rule = rules.keep_latest_n_file(2)
result = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
{"path": 1, "name": 4},
{"path": 1, "name": 5},
]
result_to_delete = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_to_delete
def test_keep_latest_n_file_empty():
rule = rules.keep_latest_n_file(10)
result = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
{"path": 1, "name": 4},
{"path": 1, "name": 5},
]
result_to_delete = []
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_to_delete
def test_keep_latest_n_file_in_folder():
rule = rules.keep_latest_n_file_in_folder(2)
result = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
{"path": 1, "name": 4},
{"path": 1, "name": 5},
{"path": 2, "name": 1},
{"path": 2, "name": 2},
{"path": 2, "name": 3},
{"path": 2, "name": 4},
{"path": 2, "name": 5},
{"path": 3, "name": 1},
{"path": 3, "name": 2},
{"path": 3, "name": 3},
]
result_to_delete = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
{"path": 2, "name": 1},
{"path": 2, "name": 2},
{"path": 2, "name": 3},
{"path": 3, "name": 1},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_to_delete
def test_keep_latest_n_file_in_folder_empty():
rule = rules.keep_latest_n_file_in_folder(100)
result = [
{"path": 1, "name": 1},
{"path": 1, "name": 2},
{"path": 1, "name": 3},
{"path": 1, "name": 4},
{"path": 1, "name": 5},
{"path": 2, "name": 1},
{"path": 2, "name": 2},
{"path": 2, "name": 3},
{"path": 2, "name": 4},
{"path": 2, "name": 5},
{"path": 3, "name": 1},
{"path": 3, "name": 2},
{"path": 3, "name": 3},
]
result_to_delete = []
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_to_delete
def test_keep_latest_version_n_file_in_folder():
rule = rules.keep_latest_version_n_file_in_folder(1)
result = [
{
"name": "name.1.2.100.tar.gz",
"path": "repo/folder",
},
{
"name": "name.1.2.200.tar.gz",
"path": "repo/folder",
},
{
"name": "new_name_1.2.3.101.tar.gz",
"path": "repo/folder",
},
{
"name": "new_name_1.2.4.100.tar.gz",
"path": "repo/folder",
},
]
result_expexted = [
{
"name": "name.1.2.100.tar.gz",
"path": "repo/folder",
},
{
"name": "new_name_1.2.3.101.tar.gz",
"path": "repo/folder",
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_delete_if_image_not_contained_in_properties():
rule = rules.delete_docker_image_if_not_contained_in_properties(
"docker-repo", "test_docker."
)
result = [
{"properties": {"test_docker.test1": "tag1"}},
{"properties": {"test_docker.test2": "tag2"}},
]
result_expexted = {
"test1": {"tag1": True},
"test2": {"tag2": True},
}
assert rule.get_properties_dict(result) == result_expexted
def test_delete_images_older_than_n_days():
rule = rules.delete_docker_images_older_than(days=10)
rule._collect_docker_size = lambda x: x
result = [
{"path": "repo/image/tag", "name": "manifest.json"},
{"path": "repo/image/tag1", "name": "manifest.json"},
{"path": "repo/image/tag2", "name": "manifest.json"},
]
result_expexted = [
{"path": "repo/image", "name": "tag"},
{"path": "repo/image", "name": "tag1"},
{"path": "repo/image", "name": "tag2"},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_file_in_folder_by_version():
rule = custom_rules.keep_latest_cross_package_n_version(2)
result = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
},
{
"name": "package-name.0.50.90.tar.gz",
"path": "package-name/develop/0.50.90/other/folder/inside",
},
{
"name": "package-name.0.50.201.tar.gz",
"path": "package-name/master/0.50.201/other/folder/inside",
},
{
"name": "package-name.0.50.94.tar.gz",
"path": "package-name/master/0.50.94/other/folder/inside",
},
{
"name": "package-name.0.51.104.tar.gz",
"path": "package-name/develop/0.51.104/other/folder/inside",
},
{
"name": "package-name.0.51.105.tar.gz",
"path": "package-name/release/0.51.105/other/folder/inside",
},
]
result_expexted = [
{
"name": "package-name.0.50.94.tar.gz",
"path": "package-name/master/0.50.94/other/folder/inside",
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_file_in_folder_by_version_does_not_suit_check_for_major_minor():
rule = custom_rules.keep_latest_cross_package_n_version(2)
# версия артефакта, которая не подходит по количеству цифр не удаляется. В result: 0.50.1.02
result = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
},
{
"name": "package-name.0.50.101.tar.gz",
"path": "package-name/develop/0.50.101/other/folder/inside",
},
{
"name": "package-name.0.50.1.02.tar.gz",
"path": "package-name/master/0.50.1.02/other/folder/inside",
},
{
"name": "package-name.0.50.103.tar.gz",
"path": "package-name/master/0.50.103/other/folder/inside",
},
{
"name": "package-name.0.50.104.tar.gz",
"path": "package-name/develop/0.50.104/other/folder/inside",
},
{
"name": "package-name.0.50.105.tar.gz",
"path": "package-name/master/0.50.105/other/folder/inside",
},
]
result_expexted = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_keep_latest_n_file_in_folder_by_version_multiple_versions_in_path():
rule = custom_rules.keep_latest_cross_package_n_version(1)
# Если в пути есть несколько версий, то артефакт не удаляем.
# Скорее всего ветку так назвали или ошибочно в пути появилась версия дважды. В result: /0.50/0.50.103/
result = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
},
{
"name": "package-name.0.50.101.tar.gz",
"path": "package-name/develop/0.50.101/other/folder/inside",
},
{
"name": "package-name.0.50.102.tar.gz",
"path": "package-name/0.50/0.50.102/other/folder/inside",
},
{
"name": "package-name.0.50.103.tar.gz",
"path": "package-name/0.50/0.50.103/other/folder/inside",
},
{
"name": "package-name.0.50.104.tar.gz",
"path": "package-name/master/0.50.104/other/folder/inside",
},
]
result_expexted = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
},
{
"name": "package-name.0.50.102.tar.gz",
"path": "package-name/0.50/0.50.102/other/folder/inside",
},
]
result_after_filter = rule.filter_result(result)
assert result_after_filter == result_expexted
def test_delete_files_that_do_not_exist_in_other_repository():
rule = custom_rules.delete_files_that_do_not_exist_in_other_repository(
"other_repository", "property"
)
result = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
"properties": {"property": "95117"},
},
{
"name": "package-name.0.50.101.tar.gz",
"path": "package-name/master/0.50.101/other/folder/inside",
"properties": {"property": "95118"},
},
{
"name": "package-name.0.50.102.tar.gz",
"path": "package-name/master/0.50.102/other/folder/inside",
"properties": {"property": "95119"},
},
{
"name": "package-name.0.50.103.tar.gz",
"path": "package-name/master/0.50.103/other/folder/inside",
},
]
artifacts_in_other_repo = [
{
"name": "package-name.0.50.100.tar.gz",
"path": "package-name/master/0.50.100/other/folder/inside",
"properties": {"property": "95117"},
},
{
"name": "package-name.0.50.101.tar.gz",
"path": "package-name/master/0.50.101/other/folder/inside",
"properties": {"property": "95118"},
},
{
"name": "package-name.0.50.102.tar.gz",
"path": "package-name/master/0.50.102/other/folder/inside",
},
]
result_expexted = [
{
"name": "package-name.0.50.102.tar.gz",
"path": "package-name/master/0.50.102/other/folder/inside",
"properties": {"property": "95119"},
},
]
result_after_filter = rule.remove_artifacts_from_result_artifact_if_property_exists_in_other_repository(
result, artifacts_in_other_repo
)
assert result_after_filter == result_expexted
def test_docker_values():
rule = rules.delete_docker_image_if_not_contained_in_properties_value(
"docker-repo", "test_docker."
)
result = [
{"properties": {"test_docker.test1": "value1"}},
{"properties": {"test_docker.test2": "value2"}},
{"properties": {"no_test_docker.test3": "value3"}},
{"no_properties": {"test_key4": "value4"}},
]
expected_set = {"value1", "value2"}
test_set = rule.get_properties_values(result)
assert test_set == expected_set
|
#!/usr/bin/env python
import pyparsing as pp
lpar = pp.Literal("(")
rpar = pp.Literal(")")
string = pp.QuotedString('"', escQuote='""', multiline=True)
keyword = (
pp.Keyword("unique")
| pp.Keyword("enum")
| pp.Keyword("object")
| pp.Keyword("end")
| pp.Keyword("adjust")
)
number = pp.Regex(r"\-?[0-9\.]+")
inline_comment = pp.Literal("//").suppress() + pp.restOfLine
comment = pp.cStyleComment | inline_comment
name = ~keyword + pp.Word(pp.alphas, pp.alphanums + r"._[]{}\\|")
op = pp.oneOf("+ - * / % ^ | < >")
unary_op = pp.oneOf("- |")
reference = pp.Literal("@").suppress() + (name | pp.Regex(r"[0-9]+"))
atom = string | reference | number | name
# TODO: are min/max even implemented in Avara?
func0 = pp.oneOf("random")
func1 = pp.oneOf("sin cos int round")
# This would be great if we cared to evaluate these expressions and not just match them.
# expr = pp.infixNotation(atom, [
# (unary_op, 1, pp.opAssoc.RIGHT),
# (function, 1, pp.opAssoc.RIGHT),
# (op, 2, pp.opAssoc.LEFT),
# ])
expr = pp.Forward()
expr <<= (
pp.ZeroOrMore(unary_op) + (func0 | (func1 + expr) | atom) + pp.ZeroOrMore(op + expr)
) | (pp.ZeroOrMore(unary_op) + lpar + expr + rpar)
declaration = (reference | name) + pp.Literal("=").suppress() + expr
end = pp.Literal("end").suppress()
obj_body = pp.ZeroOrMore(comment.suppress() | declaration)
obj = pp.Literal("object") + name + obj_body + end
adjust = pp.Literal("adjust") + name + obj_body + end
unique = (
pp.Literal("unique").suppress() + pp.Optional(number) + pp.ZeroOrMore(name) + end
)
enum = pp.Literal("enum").suppress() + number + pp.OneOrMore(name) + end
decl_group = pp.OneOrMore(declaration)
non_ascii = pp.Regex(r"[^\x00-\xff]+").suppress()
script = pp.ZeroOrMore(
comment.suppress() | decl_group | unique | enum | adjust | obj | non_ascii | end
)
DEFAULT_CONTEXT = ("fill", "frame", "cx", "cz", "r", "angle", "extent")
OBJ_CONTEXT = {
"SkyColor": ("fill", "frame"),
"GroundColor": ("fill", "frame"),
"Wall": ("fill", "frame", "x", "y", "z", "w", "d", "h"),
"WallDoor": (
"fill",
"frame",
"x",
"y",
"z",
"w",
"d",
"h",
"r",
"cx",
"cz",
"angle",
"extent",
),
"WallSolid": (
"fill",
"frame",
"x",
"y",
"z",
"w",
"d",
"h",
"r",
"cx",
"cz",
"angle",
"extent",
),
"FreeSolid": (
"fill",
"frame",
"x",
"y",
"z",
"w",
"d",
"h",
"r",
"cx",
"cz",
"angle",
"extent",
),
"Field": (
"fill",
"frame",
"x",
"y",
"z",
"w",
"d",
"h",
"r",
"cx",
"cz",
"angle",
"extent",
),
"YonBox": ("fill", "frame", "x", "y", "z", "w", "d", "h", "r", "angle", "extent"),
"Ramp": ("fill", "frame", "x", "y", "z", "w", "d", "h", "r", "angle", "extent"),
}
def object_context(name, context):
keys = OBJ_CONTEXT.get(name, DEFAULT_CONTEXT)
return {k: context[k] for k in keys if k in context}
def xmlchars(astr):
return astr.replace('"', """).replace("<", "<").replace(">", ">")
class Element(object):
def __init__(self, tag, *children, **attrs):
self.tag = tag
self.children = list(children)
self.attrs = attrs
def __str__(self):
return self.xml()
def pop_last(self, tag):
idx = len(self.children) - 1
for el in reversed(self.children):
if el.tag == tag:
break
idx = idx - 1
if idx >= 0:
return self.children.pop(idx)
def xml(self, indent=2):
attrs = "".join(
' {}="{}"'.format(name, xmlchars(str(value)))
for name, value in self.attrs.items()
)
if self.children:
text = "\n"
for child in self.children:
if isinstance(child, str):
text += xmlchars(child)
else:
text += (" " * indent) + str(child) + "\n"
return "<{tag}{attrs}>{text}</{tag}>".format(
tag=self.tag,
attrs=attrs,
text=text,
)
else:
return "<{tag}{attrs} />".format(
tag=self.tag,
attrs=attrs,
)
class ScriptObject(object):
def __repr__(self):
return self.__class__.__name__
def process(self, context):
return True
class String(ScriptObject):
def __init__(self, tokens):
self.text = tokens[0].strip()
def __str__(self):
return self.text.replace('"', '""')
class Number(ScriptObject):
def __init__(self, tokens):
self.num = float(tokens[0]) if "." in tokens[0] else int(tokens[0])
def __str__(self):
return str(self.num)
class Reference(ScriptObject):
def __init__(self, tokens):
self.name = tokens[0]
def __str__(self):
return "@{}".format(self.name)
class Declaration(ScriptObject):
def __init__(self, tokens):
self.name = str(tokens[0]).replace("[", ".").replace("]", "")
if self.name == "ambient":
self.name = "ambient.i"
self.expr = tokens[1:]
@property
def value(self):
if (
self.name in ("designer", "information", "text")
and len(self.expr) == 1
and not isinstance(self.expr[0], String)
):
return "$" + str(self.expr[0])
parts = []
for t in self.expr:
parts.append(str(t))
if t not in ("-", "|"):
parts.append(" ")
return "".join(parts).strip()
def __str__(self):
return "{} = {}".format(self.name, self.value)
def element(self, context):
return Element("set", **{self.name: self.value})
def process(self, context):
context[self.name] = self.value
if self.name == "wa":
try:
# Set "wa" as "y" on the Wall object directly, not in context.
float(self.value)
return False
except ValueError:
pass
return True
class Unique(ScriptObject):
def __init__(self, tokens):
self.names = tokens
def __str__(self):
return "unique {} end".format(" ".join(str(name) for name in self.names))
def element(self, context):
return Element("unique", vars=" ".join(str(name) for name in self.names))
class Enum(ScriptObject):
def __init__(self, tokens):
self.start = tokens[0]
self.names = tokens[1:]
def __str__(self):
return "enum {} {} end".format(
self.start, " ".join(str(name) for name in self.names)
)
def element(self, context):
return Element(
"enum", start=self.start, vars=" ".join(str(name) for name in self.names)
)
class Object(ScriptObject):
def __init__(self, tokens):
self.tag = tokens[0]
self.name = tokens[1]
self.declarations = tokens[2:]
def __str__(self):
decls = "\n".join(" " + str(d) for d in self.declarations)
return "{} {}\n{}\nend".format(self.tag, self.name, decls)
def element(self, context):
attrs = {d.name: d.value for d in self.declarations}
attrs.update(object_context(self.name, context))
return Element(self.name, **attrs)
class DeclarationGroup(ScriptObject):
def __init__(self, tokens):
self.declarations = tokens
def __str__(self):
return "\n".join(str(d) for d in self.declarations)
def element(self, context):
# Only include declaration attributes which would have been processed.
fake_context = {}
attrs = {d.name if d.name != "ambient" else "ambient.i": d.value
for d in self.declarations if d.process(fake_context)}
return Element("set", **attrs)
def process(self, context):
bools = [d.process(context) for d in self.declarations]
return any(bools)
string.setParseAction(String)
reference.setParseAction(Reference)
enum.setParseAction(Enum)
unique.setParseAction(Unique)
declaration.setParseAction(Declaration)
decl_group.setParseAction(DeclarationGroup)
obj.setParseAction(Object)
adjust.setParseAction(Object)
class ScriptParseError(Exception):
pass
def parse_script(text, strict=True):
try:
return script.parseString(text, parseAll=strict)
except pp.ParseException:
raise ScriptParseError("Failed to parse AvaraScript:\n" + text)
if __name__ == "__main__":
import sys
for t in parse_script(sys.stdin.read()):
print(t)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.db import migrations, models
import saas.models
class Migration(migrations.Migration):
dependencies = [
('saas', '0005_auto_20161101_1025'),
]
operations = [
migrations.AddField(
model_name='saasapp',
name='logo',
field=models.ImageField(null=True, upload_to=b'applogo', blank=True),
),
migrations.AlterField(
model_name='saasuploadfile',
name='file',
field=models.FileField(upload_to=b'saas_files', storage=saas.models.OverwriteStorage(), verbose_name='\u6587\u4ef6'),
),
]
|
# -*- coding=utf-8 -*-
import copy
import json
import random
import re
import time
from collections import OrderedDict
from time import sleep
import TickerConfig
from config.urlConf import urls
from inter.GetPassCodeNewOrderAndLogin import getPassCodeNewOrderAndLogin1
from inter.GetRandCode import getRandCode
from inter.LoginAysnSuggest import loginAysnSuggest
from inter.LoginConf import loginConf
from myException.UserPasswordException import UserPasswordException
class GoLogin:
def __init__(self, session, is_auto_code, auto_code_type):
self.session = session
self.randCode = ""
self.is_auto_code = is_auto_code
self.auto_code_type = auto_code_type
def auth(self):
"""
:return:
"""
self.session.httpClint.send(self.session.urls["loginInitCdn1"])
uamtkStaticUrl = self.session.urls["uamtk-static"]
uamtkStaticData = {"appid": "otn"}
return self.session.httpClint.send(uamtkStaticUrl, uamtkStaticData)
def codeCheck(self):
"""
验证码校验
:return:
"""
# codeCheck = self.session.urls["codeCheck"]
# codeCheckData = {
# "answer": self.randCode,
# "rand": "sjrand",
# "login_site": "E"
# }
# fresult = self.session.httpClint.send(codeCheck, codeCheckData)
codeCheckUrl = copy.deepcopy(self.session.urls["codeCheck1"])
codeCheckUrl["req_url"] = codeCheckUrl["req_url"].format(self.randCode, int(time.time() * 1000))
fresult = self.session.httpClint.send(codeCheckUrl)
if not isinstance(fresult, str):
print("登录失败")
return
fresult = eval(fresult.split("(")[1].split(")")[0])
if "result_code" in fresult and fresult["result_code"] == "4":
print(u"验证码通过,开始登录..")
return True
else:
if "result_message" in fresult:
print(fresult["result_message"])
sleep(1)
self.session.httpClint.del_cookies()
def baseLogin(self, user, passwd):
"""
登录过程
:param user:
:param passwd:
:return: 权限校验码
"""
logurl = self.session.urls["login"]
loginData = OrderedDict()
loginData["username"] = user,
loginData["password"] = passwd,
loginData["appid"] = "otn",
loginData["answer"] = self.randCode,
tresult = self.session.httpClint.send(logurl, loginData)
if 'result_code' in tresult and tresult["result_code"] == 0:
print(u"登录成功")
tk = self.auth()
if "newapptk" in tk and tk["newapptk"]:
return tk["newapptk"]
else:
return False
elif 'result_message' in tresult and tresult['result_message']:
messages = tresult['result_message']
if messages.find(u"密码输入错误") is not -1:
raise UserPasswordException("{0}".format(messages))
else:
print(u"登录失败: {0}".format(messages))
print(u"尝试重新登陆")
return False
else:
return False
def getUserName(self, uamtk):
"""
登录成功后,显示用户名
:return:
"""
if not uamtk:
return u"权限校验码不能为空"
else:
uamauthclientUrl = self.session.urls["uamauthclient"]
data = {"tk": uamtk}
uamauthclientResult = self.session.httpClint.send(uamauthclientUrl, data)
if uamauthclientResult:
if "result_code" in uamauthclientResult and uamauthclientResult["result_code"] == 0:
print(u"欢迎 {} 登录".format(uamauthclientResult["username"]))
return True
else:
return False
else:
self.session.httpClint.send(uamauthclientUrl, data)
url = self.session.urls["getUserInfo"]
self.session.httpClint.send(url)
def go_login(self):
"""
登陆
:param user: 账户名
:param passwd: 密码
:return:
"""
user, passwd = TickerConfig.USER, TickerConfig.PWD
self.request_device_id()
if not user or not passwd:
raise UserPasswordException(u"温馨提示: 用户名或者密码为空,请仔细检查")
login_num = 0
while True:
if loginConf(self.session):
self.auth()
result = getPassCodeNewOrderAndLogin1(session=self.session, imgType="login")
if not result:
continue
self.randCode = getRandCode(self.is_auto_code, self.auto_code_type, result)
print(self.randCode)
login_num += 1
self.auth()
if self.codeCheck():
uamtk = self.baseLogin(user, passwd)
if uamtk:
self.getUserName(uamtk)
break
else:
loginAysnSuggest(self.session, username=user, password=passwd)
login_num += 1
break
def request_device_id(self):
"""
获取加密后的浏览器特征 ID
:return:
"""
params = {"algID": self.request_alg_id(), "timestamp": int(time.time() * 1000)}
params = dict(params, **self._get_hash_code_params())
response = self.session.httpClint.send(urls.get("getDevicesId"), params=params)
if response.find('callbackFunction') >= 0:
result = response[18:-2]
try:
result = json.loads(result)
self.session.httpClint.set_cookies({
'RAIL_EXPIRATION': result.get('exp'),
'RAIL_DEVICEID': result.get('dfp'),
})
except:
return False
def request_alg_id(self):
response = self.session.httpClint.send(urls.get("GetJS"))
result = re.search(r'algID\\x3d(.*?)\\x26', response)
try:
return result.group(1)
except (IndexError, AttributeError) as e:
pass
return ""
def _get_hash_code_params(self):
from collections import OrderedDict
data = {
'adblock': '0',
'browserLanguage': 'en-US',
'cookieEnabled': '1',
'custID': '133',
'doNotTrack': 'unknown',
'flashVersion': '0',
'javaEnabled': '0',
'jsFonts': 'c227b88b01f5c513710d4b9f16a5ce52',
'localCode': '3232236206',
'mimeTypes': '52d67b2a5aa5e031084733d5006cc664',
'os': 'MacIntel',
'platform': 'WEB',
'plugins': 'd22ca0b81584fbea62237b14bd04c866',
'scrAvailSize': str(random.randint(500, 1000)) + 'x1920',
'srcScreenSize': '24xx1080x1920',
'storeDb': 'i1l1o1s1',
'timeZone': '-8',
'touchSupport': '99115dfb07133750ba677d055874de87',
'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.' + str(
random.randint(
5000, 7000)) + '.0 Safari/537.36',
'webSmartID': 'f4e3b7b14cc647e30a6267028ad54c56',
}
data_trans = {
'browserVersion': 'd435',
'touchSupport': 'wNLf',
'systemLanguage': 'e6OK',
'scrWidth': 'ssI5',
'openDatabase': 'V8vl',
'scrAvailSize': 'TeRS',
'hasLiedResolution': '3neK',
'hasLiedOs': 'ci5c',
'timeZone': 'q5aJ',
'userAgent': '0aew',
'userLanguage': 'hLzX',
'jsFonts': 'EOQP',
'scrAvailHeight': '88tV',
'browserName': '-UVA',
'cookieCode': 'VySQ',
'online': '9vyE',
'scrAvailWidth': 'E-lJ',
'flashVersion': 'dzuS',
'scrDeviceXDPI': '3jCe',
'srcScreenSize': 'tOHY',
'storeDb': 'Fvje',
'doNotTrack': 'VEek',
'mimeTypes': 'jp76',
'sessionStorage': 'HVia',
'cookieEnabled': 'VPIf',
'os': 'hAqN',
'hasLiedLanguages': 'j5po',
'hasLiedBrowser': '2xC5',
'webSmartID': 'E3gR',
'appcodeName': 'qT7b',
'javaEnabled': 'yD16',
'plugins': 'ks0Q',
'appMinorVersion': 'qBVW',
'cpuClass': 'Md7A',
'indexedDb': '3sw-',
'adblock': 'FMQw',
'localCode': 'lEnu',
'browserLanguage': 'q4f3',
'scrHeight': '5Jwy',
'localStorage': 'XM7l',
'historyList': 'kU5z',
'scrColorDepth': "qmyu"
}
data = OrderedDict(data)
d = ''
params = {}
for key, item in data.items():
d += key + item
key = data_trans[key] if key in data_trans else key
params[key] = item
d_len = len(d)
d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1
if d_len >= 3:
d = d[d_f:2 * d_f] + d[2 * d_f:d_len] + d[0: d_f]
d_len = len(d)
d_f = int(d_len / 3) if d_len % 3 == 0 else int(d_len / 3) + 1
if d_len >= 3:
d = d[2 * d_f:d_len] + d[0: d_f] + d[1 * d_f: 2 * d_f]
d = self._encode_data_str_v2(d)
d = self._encode_data_str_v2(d)
d = self._encode_data_str_v2(d)
data_str = self._encode_string(d)
params['hashCode'] = data_str
return params
def _encode_data_str_v2(self, d):
b = len(d)
if b % 2 == 0:
return d[b // 2: b] + d[0:b // 2]
else:
return d[b // 2 + 1:b] + d[b // 2] + d[0:b // 2]
def _encode_string(self, str):
import hashlib
import base64
result = base64.b64encode(hashlib.sha256(str.encode()).digest()).decode()
return result.replace('+', '-').replace('/', '_').replace('=', '') |
"""
setup.py - Setup package with the help Python's DistUtils
See https://www.python-ldap.org/ for details.
"""
import sys,os
from setuptools import setup, Extension
if sys.version_info < (3, 6):
raise RuntimeError(
'The C API from Python 3.6+ is required, found %s' % sys.version_info
)
from configparser import ConfigParser
sys.path.insert(0, os.path.join(os.getcwd(), 'Lib/ldap'))
import pkginfo
#-- A class describing the features and requirements of OpenLDAP 2.0
class OpenLDAP2:
library_dirs = []
include_dirs = []
extra_compile_args = []
extra_link_args = []
extra_objects = []
libs = ['ldap', 'lber']
defines = []
extra_files = []
LDAP_CLASS = OpenLDAP2
#-- Read the [_ldap] section of setup.cfg
cfg = ConfigParser()
cfg.read('setup.cfg')
if cfg.has_section('_ldap'):
for name in dir(LDAP_CLASS):
if cfg.has_option('_ldap', name):
setattr(LDAP_CLASS, name, cfg.get('_ldap', name).split())
for i in range(len(LDAP_CLASS.defines)):
LDAP_CLASS.defines[i]=((LDAP_CLASS.defines[i],None))
for i in range(len(LDAP_CLASS.extra_files)):
destdir, origfiles = LDAP_CLASS.extra_files[i].split(':')
origfileslist = origfiles.split(',')
LDAP_CLASS.extra_files[i]=(destdir, origfileslist)
if os.environ.get('WITH_GCOV'):
# Instrumentation for measuring code coverage
LDAP_CLASS.extra_compile_args.extend(
['-O0', '-pg', '-fprofile-arcs', '-ftest-coverage']
)
LDAP_CLASS.extra_link_args.append('-pg')
LDAP_CLASS.libs.append('gcov')
#-- Let distutils/setuptools do the rest
name = 'python-ldap'
setup(
#-- Package description
name = name,
license=pkginfo.__license__,
version=pkginfo.__version__,
description = 'Python modules for implementing LDAP clients',
long_description = """python-ldap:
python-ldap provides an object-oriented API to access LDAP directory servers
from Python programs. Mainly it wraps the OpenLDAP 2.x libs for that purpose.
Additionally the package contains modules for other LDAP-related stuff
(e.g. processing LDIF, LDAPURLs, LDAPv3 schema, LDAPv3 extended operations
and controls, etc.).
""",
author = 'python-ldap project',
author_email = 'python-ldap@python.org',
url = 'https://www.python-ldap.org/',
download_url = 'https://pypi.org/project/python-ldap/',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
# Note: when updating Python versions, also change tox.ini and .github/workflows/*
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP',
'License :: OSI Approved :: Python Software Foundation License',
],
#-- C extension modules
ext_modules = [
Extension(
'_ldap',
[
'Modules/LDAPObject.c',
'Modules/ldapcontrol.c',
'Modules/common.c',
'Modules/constants.c',
'Modules/functions.c',
'Modules/ldapmodule.c',
'Modules/message.c',
'Modules/options.c',
'Modules/berval.c',
],
depends = [
'Modules/LDAPObject.h',
'Modules/berval.h',
'Modules/common.h',
'Modules/constants_generated.h',
'Modules/constants.h',
'Modules/functions.h',
'Modules/ldapcontrol.h',
'Modules/message.h',
'Modules/options.h',
],
libraries = LDAP_CLASS.libs,
include_dirs = ['Modules'] + LDAP_CLASS.include_dirs,
library_dirs = LDAP_CLASS.library_dirs,
extra_compile_args = LDAP_CLASS.extra_compile_args,
extra_link_args = LDAP_CLASS.extra_link_args,
extra_objects = LDAP_CLASS.extra_objects,
runtime_library_dirs = (not sys.platform.startswith("win"))*LDAP_CLASS.library_dirs,
define_macros = LDAP_CLASS.defines + \
('ldap_r' in LDAP_CLASS.libs or 'oldap_r' in LDAP_CLASS.libs)*[('HAVE_LIBLDAP_R',None)] + \
('sasl' in LDAP_CLASS.libs or 'sasl2' in LDAP_CLASS.libs or 'libsasl' in LDAP_CLASS.libs)*[('HAVE_SASL',None)] + \
('ssl' in LDAP_CLASS.libs and 'crypto' in LDAP_CLASS.libs)*[('HAVE_TLS',None)] + \
[
('LDAPMODULE_VERSION', pkginfo.__version__),
('LDAPMODULE_AUTHOR', pkginfo.__author__),
('LDAPMODULE_LICENSE', pkginfo.__license__),
]
),
],
#-- Python "stand alone" modules
py_modules = [
'ldapurl',
'ldif',
],
packages = [
'ldap',
'ldap.controls',
'ldap.extop',
'ldap.schema',
'slapdtest',
],
package_dir = {'': 'Lib',},
data_files = LDAP_CLASS.extra_files,
include_package_data=True,
install_requires=[
'pyasn1 >= 0.3.7',
'pyasn1_modules >= 0.1.5',
],
zip_safe=False,
python_requires='>=3.6',
test_suite = 'Tests',
)
|
from .Strategy import Strategy |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .configmanagement import (
BinauthzConfig,
BinauthzState,
BinauthzVersion,
ConfigSync,
ConfigSyncDeploymentState,
ConfigSyncState,
ConfigSyncVersion,
ErrorResource,
GatekeeperDeploymentState,
GitConfig,
GroupVersionKind,
HierarchyControllerConfig,
HierarchyControllerDeploymentState,
HierarchyControllerState,
HierarchyControllerVersion,
InstallError,
MembershipSpec,
MembershipState,
OperatorState,
PolicyController,
PolicyControllerState,
PolicyControllerVersion,
SyncError,
SyncState,
DeploymentState,
)
__all__ = (
'BinauthzConfig',
'BinauthzState',
'BinauthzVersion',
'ConfigSync',
'ConfigSyncDeploymentState',
'ConfigSyncState',
'ConfigSyncVersion',
'ErrorResource',
'GatekeeperDeploymentState',
'GitConfig',
'GroupVersionKind',
'HierarchyControllerConfig',
'HierarchyControllerDeploymentState',
'HierarchyControllerState',
'HierarchyControllerVersion',
'InstallError',
'MembershipSpec',
'MembershipState',
'OperatorState',
'PolicyController',
'PolicyControllerState',
'PolicyControllerVersion',
'SyncError',
'SyncState',
'DeploymentState',
)
|
"""Defines accessor with methods for preserving metadata
TODO: Test on arrays with more than three dimensions
TODO: Figure out when to use copying
TODO: Add inplace kwarg?
TODO: Verify docstrings, especially type being returned
"""
import numpy as np
import xarray as xr
from .metadata import MetadataRef
from .utils import add_dim, copy_xr_metadata
class BaseAccessor:
"""Extends xarray objects to parse and run calculations on satellite data
This accessor makes limited use of type checking where functionality of
arrays and datasets differs.
Attributes
----------
metadata_coord: str
name of the coord that stores metadata for this accessor
name: str
name of the accessor as registered with xarray. Assigned on
subclasses.
metadata: dict
metadata stored on the imagery_ref scalar variable
"""
def __init__(self, xobj):
self._obj = xobj
self._metadata = None
# Create metadata_ref coord if it does not exist
try:
self._obj.coords["metadata_ref"]
except KeyError:
self._obj.coords["metadata_ref"] = 0
def __iter__(self):
"""Iterates across layers of this object"""
if isinstance(self._obj, xr.DataArray):
obj = self._obj if len(self._obj.shape) > 2 else [self._obj]
return iter(obj)
return iter(self._obj.values())
@property
def metadata(self):
"""Returns imagery metadata, deriving it from the filename if possible
Metadata is attached to the metadata_ref scalar variable, which allows
it to carry over on many array operations. If that scalar variable
does not exist, it is added the first time the metadata property
is accessed.
"""
if not self._metadata:
self._metadata = MetadataRef(self._obj)
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._obj.coords["metadata_ref"].attrs = dict(metadata)
self._metadata = MetadataRef(self._obj)
self._metadata.propagate()
@property
def name(self):
return self.metadata["ACC_NAME"]
@name.setter
def name(self, val):
self._obj.coords["metadata_ref"].attrs["ACC_NAME"] = val
def concat(self, xarrs, **kwargs):
"""Concatenates arrays into an object of the same type as original
Parameters
----------
xarrs: list-like
list of arrays
kwargs:
any keywrod argument accepted by xr.concat. Only used when
concatenating arrays.
Returns
-------
xarray.DataArray or xarray.Dataset
Concatenated array or dataset
"""
if isinstance(self._obj, xr.DataArray):
return xr.concat(xarrs, **kwargs)
return xr.Dataset(dict(zip(self._obj.keys(), xarrs)),
coords=self._obj.coords,
attrs=self._obj.attrs)
def npfunc(self, npfunc, *args, **kwargs):
"""Runs a numpy function on the xarray
This method allows the user to run certain numpy functions without
losing the xarray attributes, coordinates, etc. managed by the
accessor. The numpy function must return an array with the same
shape as the original.
Parameters
----------
npfunc: str or callable
either the name of a numpy function or the function itself. The
function must take the xarray object as the first argument.
args:
any additional arguments accepted by the numpy function
kwargs:
any additional jeyword arguments accepted by the numpy function
Returns
-------
xarray.DataArray
New array with results of the numpy function and all original
metadata
"""
# Convert string to function
if isinstance(npfunc, str):
npfunc = getattr(np, npfunc)
obj = self._obj
if isinstance(self._obj, xr.Dataset):
obj = obj.to_array(dim="band")
arr = npfunc(obj, *args, **kwargs)
return self.copy_xr_metadata(arr, name=f"np.{npfunc.__name__}")
def copy_xr_metadata(self, other, name="array"):
"""Copies coords and dims from current object to another array
This allows metadata stored in the original to be reintegrated after
using a function that drops them (like any numpy function).
Parameters
----------
other: numpy.array
an array with the same shape as the current data array
Returns
-------
xarray.DataArray
Array with coords and dims copied over
"""
xobj = copy_xr_metadata(self._obj, other)
if "band" not in xobj.coords and "band" not in xobj.dims:
xobj = add_dim(xobj, dim="band", coords={"name" : [name]})
return xobj
|
from urwid_pydux import ConnectedComponent
from git_sew.ui.cli.components.generics import ListItem, OrderedList
class StateView(ConnectedComponent):
def map_state_to_props(self, state, own_props):
return {"items": [ListItem(text=f"{k}: {v}") for k, v in state.items()]}
def render_component(self, props):
return OrderedList(items=props["items"])
|
# students = [{"name": "Vova",
# "last_name": "Zinkovsky",
# "age": 17,
# "scores": [1, 2, 3, 4, 5],
# "hobbies": ['play', 'programming', 'reading']
# },
# {"name": "Begimai",
# "last_name": "Zhumakova",
# "age": 18,
# "scores": [5, 5, 3, 4, 5],
# "hobbies": ['pubg', 'programming', 'reading', 'walking']
# },
# {"name": "Aliya",
# "last_name": "Andabekova",
# "age": 18,
# "scores": [1, 4, 3, 1, 2],
# "hobbies": ['programming', 'reading', 'drawing']
# },
# {"name": "Cholpon",
# "last_name": "Kaimova",
# "age": 16,
# "scores": [5, 2, 4, 4, 5],
# "hobbies": ['pubg', 'programming', 'reading', 'anime', ]
# },
# {"name": "Bakyt",
# "last_name": "Asanaliev",
# "age": 35,
# "scores": [4, 2, 4, 4, 5],
# "hobbies": ['play', 'programming', 'reading', 'footbal', 'history']
# },
# {"name": "Maksim",
# "last_name": "Surovkin",
# "age": 22,
# "scores": [],
# "hobbies": ['programming', 'reading', 'traveling', 'cycling']
# }
# ]
# general_avg = 0
# student_avg = []
# std = 0
# for student in students:
# sum = 0
# for score in student['scores']:
# sum += score
# try:
# students_avg = sum / len(student['scores'])
# except ZeroDivisionError:
# students_avg = 0
# student_avg.append(students_avg)
# sum_avg = 0
# for avg in student_avg:
# sum_avg += avg
# general_avg = sum_avg / len(student_avg)
# max = 0
# min = 5
# i = 0
#
# while i < len(student_avg):
# if student_avg[i] > max:
# max = student_avg[i]
# elif student_avg[i] < min:
# min = student_avg[i]
# i += 1
# std = max - min
# print("student average:", student_avg)
# print("general average:", round(general_avg, 2))
# print("std:", round(std, 2))
#
# dict_loop = 0
#
# import pprint
#
# for student in students:
# student['avg'] = student_avg[dict_loop]
# dict_loop += 1
# pprint.pprint(students)
# import pprint
# data = [
# {"dress":[
# {'name':'louis vuitton',
# 'popularity':500,
# "price":1000
# },
# {'name':'versace',
# 'popularity':210,
# "price":888
# },
# {'name':'supreme',
# 'popularity':57,
# "price":765
# },
# ]
# },
# {'jeans':[
# {'name':'adidas',
# 'popularity':42,
# 'price':2300
# },
# {'name':'armani',
# 'popularity':678,
# 'price':110
# },
# {'name':'casio',
# 'popularity':230,
# 'price':3000
# },
# ]
# },
# {'t-shirt':[
# {'name':'tom ford',
# 'popularity':999,
# 'price':5000
# },
# {'name':'lacoste',
# 'popularity':777,
# 'price':230
# },
# {'name':'luxury',
# 'popularity':876,
# 'price':2300
# },
# ]
# }
# ]
# list1 = ['dress', 'jeans', 't-shirt']
#
# i = 0
# category_price = {}
#
# for category in data:
# category_sum = 0
# key = list1[i]
# category_value = category[key]
# for product in category_value:
# category_sum += product['price']
# category_price[key] = category_sum
# i += 1
# print(max(category_price.values()))
#
# list2 = ['dress', 'jeans', 't-shirt']
# k = 0
# category_price2 = {}
# for category2 in data:
# category_sum2 = 0
# key2 = list1[k]
# category_value2 = category2[key2]
# for product2 in category_value2:
# category_sum2 += product2['popularity']
# category_price2[key2] = category_sum2
# k += 1
# print(max(category_price2.values()))
#
#
# list3 = ['dress', 'jeans', 't-shirt']
# j = 0
# prices = []
#
# for max_price in data:
# key3 = list3[j]
# maxp_value = max_price[key3]
# for product in maxp_value:
# prices.append(product['price'])
# j += 1
# print(max(prices),min(prices))
#
#
# file1 = open('text.txt', 'w')
# file1.write(str(max(prices)))
#
|
import re
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
CURDIR = dirname(abspath(__file__))
with open("README.md", "r", encoding='utf-8') as fh:
LONG_DESCRIPTION = fh.read()
# Get the version from the _version.py versioneer file. For a git checkout,
# this is computed based on the number of commits since the last tag.
import versioneer
VERSION = str(versioneer.get_versions()['version']).split('+')[0]
del versioneer.get_versions
setup(
name="robotframework-PuppeteerLibrary",
version=VERSION,
author="QA Hive Co.,Ltd",
author_email="support@qahive.com",
description="PuppeteerLibrary is a Web Testing library for Robot Framework.",
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license="Apache License 2.0",
url='https://qahive.github.io/robotframework-puppeteer.github.io/',
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Testing :: Acceptance",
"Framework :: Robot Framework",
],
keywords='robotframework puppeteer web-testing automation',
platforms='any',
install_requires=[
'robotframework>=3.2.1',
'playwright==1.10.0',
'pyppeteer==0.2.5',
],
python_requires='>3.6',
# test_suite='nose.collector',
# tests_require=['nose', 'parameterized'],
zip_safe=False,
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-13 23:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0006_question_notes'),
]
operations = [
migrations.AlterField(
model_name='question',
name='notes',
field=models.TextField(default=''),
),
]
|
#!/usr/bin/env python3
"""
Authors: Venkat Ramaraju, Jayanth Rao
Functionality implemented:
- Generates and aggregates polarities across headlines and conversations
"""
# Libraries and Dependencies
import os
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
from nltk.stem import WordNetLemmatizer
import tweepy
# Global Variables
sia = SentimentIntensityAnalyzer()
lemmatizer = WordNetLemmatizer()
conversations_map = {}
headlines_map = {}
def update_stock_terminology():
"""
Creates dictionary with updated terminologies for SentimentIntensityAnalyzer. Includes positive and negative words,
along with polarized words with weights. Used to improve VADER accuracy.
"""
stock_lexicon = {}
csv_df = pd.read_csv('setup_csvs/polarized_stock_lex.csv')
for index, row in csv_df.iterrows():
stock_lexicon[row['word']] = row['polarity']
# Updates existing dictionary with stock-related terms
resulting_lex = {}
resulting_lex.update(stock_lexicon)
resulting_lex.update(sia.lexicon)
sia.lexicon = resulting_lex
def get_headline_sentiments():
"""
Analyze polarities of the given stock tickers, based on terminologies inserted in SentimentIntensityAnalyzer.
Prints out the aggregated results to CSV.
"""
headlines_csv = pd.read_csv("../Data_Collection/headlines.csv")
sum_of_polarities = {}
count_of_headlines = {}
# Aggregates data across headlines
for index, row in headlines_csv.iterrows():
try:
lemma_text = lemmatizer.lemmatize(str(row['Headline']))
scores = sia.polarity_scores(lemma_text)
row["Polarity"] = scores["compound"]
if row['Ticker'] not in sum_of_polarities:
sum_of_polarities[row['Ticker']] = scores["compound"]
count_of_headlines[row['Ticker']] = 1
else:
sum_of_polarities[row['Ticker']] = sum_of_polarities[row['Ticker']] + scores["compound"]
count_of_headlines[row['Ticker']] = count_of_headlines[row['Ticker']] + 1
except RuntimeError as e:
print(e, "was handled")
for ticker in sum_of_polarities:
headlines_map[ticker] = sum_of_polarities[ticker]/count_of_headlines[ticker]
def generate_aggregated_csv():
"""
Generates a CSV with the aggregated polarities of headlines for the group of stocks that are being analyzed. In
the case where no conversations are available for a given stock, we default to Twitter conversations for our
analysis.
"""
aggregated_df = pd.DataFrame(columns=["Ticker", "Conversations", "Headlines"])
# Outputs aggregated headlines and conversations to a CSV.
for ticker, headlines_polarity in headlines_map.items():
try:
if ticker in conversations_map:
polarity = conversations_map[ticker]
else:
polarity = twitter_sentiment(ticker)
row = {"Ticker": ticker, "Conversations": polarity, "Headlines": headlines_polarity}
aggregated_df = aggregated_df.append(row, ignore_index=True)
except RuntimeError as e:
print(e, "was handled")
aggregated_df.to_csv("aggregated_polarities.csv")
def get_conversation_sentiments():
"""
Generates a CSV with the aggregated polarities of conversations for the group of stocks that are being analyzed.
"""
list_of_conversations = [f for f in os.listdir('../Data_Collection/Conversations/') if f.endswith('.csv')]
sum_of_polarities = {}
count_of_conversations = {}
# Aggregates data across conversations
for ticker_csv in list_of_conversations:
conversations_csv = pd.read_csv('../Data_Collection/Conversations/' + str(ticker_csv))
ticker = ticker_csv.split("_")[0].upper()
for index, row in conversations_csv.iterrows():
try:
lemma_text = lemmatizer.lemmatize(str(row['Conversation']))
scores = sia.polarity_scores(lemma_text)
row["Polarity"] = scores["compound"]
if ticker not in sum_of_polarities:
sum_of_polarities[ticker] = scores["compound"]
count_of_conversations[ticker] = 1
else:
sum_of_polarities[ticker] = sum_of_polarities[ticker] + scores["compound"]
count_of_conversations[ticker] = count_of_conversations[ticker] + 1
except RuntimeError as e:
print(e, "was handled")
if count_of_conversations[ticker] > 0:
conversations_map[ticker] = sum_of_polarities[ticker]/count_of_conversations[ticker]
else:
conversations_map[ticker] = 0.0
def twitter_sentiment(ticker):
"""
Gathers 100 tweets related to a specific stock ticker and runs the VADER sentiment analysis model on it to
generate a polarity scores.
:param ticker: Name of stock ticker.
:return: Aggregated polarity value for conversations on twitter.
"""
# Credentials
api_key = ""
api_secret_key = ""
access_token = ""
access_token_secret = ""
# API calls
auth = tweepy.OAuthHandler(api_key, api_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
stock = "$" + ticker
search_results = api.search(q=stock, count=100)
# Aggregating data
print("Conversations on ", stock)
polarity_sum = 0
count = 0
for tweet in search_results:
lemma_text = lemmatizer.lemmatize(str(tweet.text))
scores = sia.polarity_scores(lemma_text)
polarity_sum += scores["compound"]
count += 1
return polarity_sum/count
def main():
update_stock_terminology()
get_headline_sentiments()
get_conversation_sentiments()
generate_aggregated_csv()
if __name__ == "__main__":
main()
|
# the following for unit test
#this unittest for load_result_template as a dict data, then update
import os
import sys
import concurrent.futures
__filedir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, __filedir__ + '/../src/')
sys.path.insert(0, __filedir__ )
#path = os.path.dirname(__filedir__)
from worker import *
#this is unitest for parseinput and load_result_tp
if __name__ == "__main__":
try:
path = "/home/wu_guifeng/work/vddb/trunk/downloader"
# logging.config.fileConfig('/'.join([path, 'downloader_log.conf']))
# logger = logging.getLogger("download_worker")
with concurrent.futures.ProcessPoolExecutor(1) as executor:
with open("input_test1.json") as f:
task_info = f.read()
future = executor.submit(worker, task_info, path)
worker(task_info, path)
except Exception, reason:
print 'error', reason
# test download_file, this time doesn't support timeout and other settings
'''
if __name__ == "__main__":
try:
download_file("http://qd.cache.baidupcs.com/file/9fde0db5ed098f079dc8b36d34f0a5ea?xcode=bc9b8b287b524618520b60ef8da13535fb472fdda0f193f1&fid=2584621796-250528-894668746102053&time=1405070142&sign=FDTAXER-DCb740ccc5511e5e8fedcff06b081203-1zq2UfAIcmWt8gxOxJyuTdSzqzs%3D&to=sc,qc&fm=N,Q,U,c&sta_dx=2&sta_cs=11120&sta_ft=mp4&sta_ct=5&newver=1&expires=8h&rt=pr&r=742660034&logid=3124366643&vuk=2584621796", "./test/tianyu.mp4", 100)
except Exception, reason:
print 'error', reason
'''
# test far_gen and upload_file function
'''
if __name__ == "__main__":
try:
video_path = "/home/wu_guifeng/work/vddb/trunk/downloader/2.mp4"
path = "/home/wu_guifeng/work/vddb/trunk/downloader/" + str(uuid.uuid1())
far_path, dna_path, stats_path = path + '.far', path + '.dna' , path + '.stats'
far_gen(video_path, far_path)
generate_dna_stat(far_path, dna_path, stats_path)
upload_file(dna_path)
upload_file(stats_path)
except Exception, reason:
print 'error', reason
'''
'''
if __name__ == "__main__":
result_data = load_result_tp()
result_data["params"]["dna"]["hash"] = "aabbccddeedd"
print result_data
print type(result_data)
result = json.dumps(result_data)
print result
'''
|
"""
This module comes up with test vectors that can be used to quickly
sniff a user's time zone. It generates a decision tree that selects a
time zone based on the the UTC offsets of various points in time.
"""
import attr
from collections import OrderedDict
from datetime import datetime
import json
import math
import os.path
import pytz
dirname = os.path.dirname(__file__)
def offset_min(tz, test_point):
"Return the offset from UTC for a test point in minutes."
return tz.utcoffset(test_point, is_dst=False).total_seconds() / 60
def dedup(d, preferred):
"""
Remove duplicate entries from a dict.
Keys in preferred will be included in the resulting dict if possible.
(Later keys are favored over earlier ones.)
"""
rev = {v: k for k, v in d.items()}
for key in preferred:
rev[d[key]] = key
return {k: v for v, k in rev.items()}
def maybe_int(v):
"If v is a float representing an integer, convert to integer."
if v.is_integer():
return int(v)
return v
@attr.s
class Table:
data = attr.ib() # list of columns
row_names = attr.ib() # list of row names
def __len__(self):
return len(self.row_names)
def partition(self, col_ix):
"Return new tables partitioned using column col_ix."
return {val: self.get_partition(col_ix, val)
for val in set(self.data[col_ix])}
def get_partition(self, col_ix, val):
indexes = [i for i, v in enumerate(self.data[col_ix]) if v == val]
data = [[col[i] for i in indexes] for col in self.data]
row_names = [self.row_names[i] for i in indexes]
return Table(data=data, row_names=row_names)
def get_entropy(self, col_ix):
"Returns the entropy of a certain table column."
column = self.data[col_ix]
num_rows = len(column)
density = (column.count(v) / num_rows for v in set(column))
return -sum(p * math.log2(p) for p in density)
def max_entropy_split(self):
"Find the max. entropy column and partition the table based on it."
col_ix = max(range(len(self.data)), key=self.get_entropy)
return (col_ix, self.partition(col_ix))
@attr.s
class Node:
naive_dt = attr.ib()
children = attr.ib()
def test(self, tz):
if isinstance(self.children, str):
return self.children
return self.children[offset_min(tz, self.naive_dt)].test(tz)
def max_depth(self):
if isinstance(self.children, str):
return 0
return 1 + max(c.max_depth() for c in self.children.values())
def serialize(self):
if isinstance(self.children, str):
return self.children
ret = OrderedDict()
ret["testPoint"] = self.naive_dt.isoformat()
ret["children"] = {maybe_int(v): c.serialize()
for v, c in self.children.items()}
return ret
print("Generating test points")
test_points = [datetime(year, month, 1, 0, 27)
for year in range(2000, 2018) for month in range(1, 13)]
all_timezones = sorted(set(sum(pytz.country_timezones.values(), [])))
print("Obtained %d timezones from pytz" % len(all_timezones))
with open(os.path.join(dirname, 'equivalencies.json'), 'rt') as f:
preferred_timezones = list(json.load(f))
vectors = dedup({
tz: tuple(offset_min(pytz.timezone(tz), point) for point in test_points)
for tz in all_timezones
}, preferred_timezones)
unique_timezones = sorted(vectors)
print("Found %d unique timezones based on test points" % len(unique_timezones))
parent_table = Table(
data=tuple(map(tuple, zip(*(vectors[tz] for tz in unique_timezones)))),
row_names=unique_timezones,
)
def make_tree(table: Table):
if len(table) == 1:
return Node(naive_dt=None, children=table.row_names[0])
test_point_ix, subtables = table.max_entropy_split()
return Node(
naive_dt=test_points[test_point_ix],
children={val: make_tree(subt) for val, subt in subtables.items()}
)
tree = make_tree(parent_table)
print('Generated decision tree with depth %d' % tree.max_depth())
equivalencies = OrderedDict()
for tz in unique_timezones:
equivalencies[tz] = []
for tzname in all_timezones:
if tzname in equivalencies:
continue
equivalencies[tree.test(pytz.timezone(tzname))].append(tzname)
for k, e in tuple(equivalencies.items()):
if e:
e.sort()
else:
del equivalencies[k]
with open(os.path.join(dirname, 'js', 'tz-tree.json'), 'wt') as f:
json.dump(tree.serialize(), f, indent=2)
f.write('\n')
print('Wrote tz-tree.json')
with open(os.path.join(dirname, 'js', 'tz-tree.min.json'), 'wt') as f:
json.dump(tree.serialize(), f, indent=None, separators=(',', ':'))
print('Wrote tz-tree.min.json')
with open(os.path.join(dirname, 'equivalencies.json'), 'wt') as f:
json.dump(equivalencies, f, indent=2)
f.write('\n')
print('Wrote equivalencies.json')
|
from direct.controls import ControlManager
from direct.showbase.InputStateGlobal import inputState
#This is the new class for Toontown's ControlManager
#Had to override some functions in order to fix 'want-WASD'
class ToontownControlManager(ControlManager.ControlManager):
wantWASD = base.wantWASD#Instead of checking config.prc, get wantWASD from ToonBase
def __init__(self, enable=True, passMessagesThrough = False):
self.passMessagesThrough = passMessagesThrough
self.inputStateTokens = []
self.WASDTurnTokens = []
self.__WASDTurn = True
self.controls = {}
self.currentControls = None
self.currentControlsName = None
self.isEnabled = 0
self.forceAvJumpToken = None
self.inputToDisable = []
self.forceTokens = None
self.istWASD = []
self.istNormal = []
if enable:
self.enable()
def enable(self):
if self.isEnabled:
assert self.notify.debug('already isEnabled')
return
self.isEnabled = 1
# keep track of what we do on the inputState so we can undo it later on
#self.inputStateTokens = []
ist = self.inputStateTokens
ist.append(inputState.watch("run", 'runningEvent', "running-on", "running-off"))
ist.append(inputState.watch("forward", "force-forward", "force-forward-stop"))
ist.append(inputState.watchWithModifiers("reverse", "mouse4", inputSource=inputState.Mouse))
if self.wantWASD:
self.istWASD.append(inputState.watch("turnLeft", "mouse-look_left", "mouse-look_left-done"))
self.istWASD.append(inputState.watch("turnLeft", "force-turnLeft", "force-turnLeft-stop"))
self.istWASD.append(inputState.watch("turnRight", "mouse-look_right", "mouse-look_right-done"))
self.istWASD.append(inputState.watch("turnRight", "force-turnRight", "force-turnRight-stop"))
self.istWASD.append(inputState.watchWithModifiers("forward", "w", inputSource=inputState.WASD))
self.istWASD.append(inputState.watchWithModifiers("reverse", "s", inputSource=inputState.WASD))
self.setWASDTurn(self.__WASDTurn)
else:
self.istNormal.append(inputState.watchWithModifiers("forward", "arrow_up", inputSource=inputState.ArrowKeys))
self.istNormal.append(inputState.watchWithModifiers("reverse", "arrow_down", inputSource=inputState.ArrowKeys))
self.istNormal.append(inputState.watchWithModifiers("turnLeft", "arrow_left", inputSource=inputState.ArrowKeys))
ist.append(inputState.watch("turnLeft", "mouse-look_left", "mouse-look_left-done"))
ist.append(inputState.watch("turnLeft", "force-turnLeft", "force-turnLeft-stop"))
self.istNormal.append(inputState.watchWithModifiers("turnRight", "arrow_right", inputSource=inputState.ArrowKeys))
ist.append(inputState.watch("turnRight", "mouse-look_right", "mouse-look_right-done"))
ist.append(inputState.watch("turnRight", "force-turnRight", "force-turnRight-stop"))
# Jump controls
if self.wantWASD:
self.istWASD.append(inputState.watchWithModifiers("jump", "shift"))
else:
self.istNormal.append(inputState.watch("jump", "control", "control-up"))
if self.currentControls:
self.currentControls.enableAvatarControls()
def setWASDTurn(self, turn):
self.__WASDTurn = turn
if not self.isEnabled:
return
turnLeftWASDSet = inputState.isSet("turnLeft", inputSource=inputState.WASD)
turnRightWASDSet = inputState.isSet("turnRight", inputSource=inputState.WASD)
slideLeftWASDSet = inputState.isSet("slideLeft", inputSource=inputState.WASD)
slideRightWASDSet = inputState.isSet("slideRight", inputSource=inputState.WASD)
for token in self.WASDTurnTokens:
token.release()
if turn:#If we want toons to be able to turn instead of sliding left to right
self.WASDTurnTokens = (
inputState.watchWithModifiers("turnLeft", "a", inputSource=inputState.WASD),
inputState.watchWithModifiers("turnRight", "d", inputSource=inputState.WASD),
)
inputState.set("turnLeft", slideLeftWASDSet, inputSource=inputState.WASD)
inputState.set("turnRight", slideRightWASDSet, inputSource=inputState.WASD)
inputState.set("slideLeft", False, inputSource=inputState.WASD)
inputState.set("slideRight", False, inputSource=inputState.WASD)
else:
self.WASDTurnTokens = (
inputState.watchWithModifiers("slideLeft", "a", inputSource=inputState.WASD),
inputState.watchWithModifiers("slideRight", "d", inputSource=inputState.WASD),
)
inputState.set("slideLeft", turnLeftWASDSet, inputSource=inputState.WASD)
inputState.set("slideRight", turnRightWASDSet, inputSource=inputState.WASD)
inputState.set("turnLeft", False, inputSource=inputState.WASD)
inputState.set("turnRight", False, inputSource=inputState.WASD)
def disable(self):
self.isEnabled = 0
for token in self.inputStateTokens:
token.release()
self.inputStateTokens = []
for token in self.WASDTurnTokens:
token.release()
self.WASDTurnTokens = []
if self.currentControls:
self.currentControls.disableAvatarControls()
if self.passMessagesThrough: # for not breaking toontown
if self.wantWASD:
print ':(ToontownControlManager) WASD support was enabled.'
self.istWASD.append(inputState.watchWithModifiers("forward", "w", inputSource=inputState.WASD))
self.istWASD.append(inputState.watchWithModifiers("reverse", "s", inputSource=inputState.WASD))
self.istWASD.append(inputState.watchWithModifiers("turnLeft", "a", inputSource=inputState.WASD))
self.istWASD.append(inputState.watchWithModifiers("turnRight", "d", inputSource=inputState.WASD))
else:
print ':(ToontownControlManager) WASD support was disabled.'
self.istNormal.append(inputState.watchWithModifiers("forward", "arrow_up", inputSource=inputState.ArrowKeys))
self.istNormal.append(inputState.watchWithModifiers("reverse", "arrow_down", inputSource=inputState.ArrowKeys))
self.istNormal.append(inputState.watchWithModifiers("turnLeft", "arrow_left", inputSource=inputState.ArrowKeys))
self.istNormal.append(inputState.watchWithModifiers("turnRight", "arrow_right", inputSource=inputState.ArrowKeys))
def disableWASD(self):#Disables WASD for when chat is open.
if self.wantWASD:
self.forceTokens=[#Forces all keys to return 0. This won't affect chat input.
inputState.force(
"jump", 0, 'ControlManager.disableWASD'),
inputState.force(
"forward", 0, 'ControlManager.disableWASD'),
inputState.force(
"turnLeft", 0, 'ControlManager.disableWASD'),
inputState.force(
"slideLeft", 0, 'ControlManager.disableWASD'),
inputState.force(
"reverse", 0, 'ControlManager.disableWASD'),
inputState.force(
"turnRight", 0, 'ControlManager.disableWASD'),
inputState.force(
"slideRight", 0, 'ControlManager.disableWASD')
]
print 'disableWASD()'
def enableWASD(self):#Enables WASD after chat is closed.
if self.wantWASD:
if self.forceTokens:
for token in self.forceTokens:#Release all the forced keys we added earlier.
token.release()
self.forceTokens = []
print 'enableWASD'
def reload(self):#Called to reload the ControlManager ingame
self.wantWASD = base.wantWASD#Reload wantWASD if it was recently changed.
if self.wantWASD:
for token in self.istNormal:
token.release()#Release arrow key input
self.istNormal = []
self.inputStateTokens = []
self.disable()
self.enable()
else:
for token in self.WASDTurnTokens:
token.release()
for token in self.istWASD:
token.release()
self.istWASD = []
self.WASDTurnTokens = []
self.disable()
self.enable() |
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
from surprise import Dataset, Reader, SVD, KNNBasic
class BaselineMF:
def __init__(self, cf_algo=None, logit=False):
"""
fit method takes a ContentDataset and fits it for num_epochs (passed at initialisation)
Parameters
----------
batch_size (int): the size of each training batch
network (ContentMF): a network that fits using user_ids and item_texts
num_epochs (int): the number of training epochs
optim_params (dict): parameters passed to the Stochastic Gradient Descent (SGD) class
use_cuda (bool): set to True to use the GPU
"""
self.logit = logit
self.question_truth_dict = {}
self.average_true_rating = 0.5
self.average_false_rating = 0.5
self.loss_fn = nn.MSELoss(size_average=True)
if cf_algo is None:
self.cf_algo = KNNBasic(k=2)
else:
self.cf_algo = cf_algo
#self.svd = SVD(n_epochs=500, verbose=True, lr_all=0.001, n_factors=50)
def dataloader_extract(self, sample):
ratings = pd.Series(np.array(list(sample['rating'])))
user_ids = pd.Series(sample['user_id']).astype(str)
item_ids = pd.Series(sample['item_id']).astype(str)
return ratings, user_ids, item_ids
def logit_fn(self, p, epsilon=1e-3):
for item in p:
if item == 0:
item = epsilon
if item == 1:
item = 1 - epsilon
return np.log(p/(1-p))
def sigmoid_fn(self, x):
return 1/(1+np.exp(-x))
def fit(self, dataset, train_sampler):
"""Runs the fit method which simply works out the average response
for 'true' and 'false' questions, where 'true' questions are those
where the average rating is greater than 0.5"""
t0 = time.time()
data_loader = DataLoader(dataset, batch_size=len(train_sampler), sampler=train_sampler)
sample = iter(data_loader).next()
ratings, user_ids, item_ids = self.dataloader_extract(sample)
if self.logit:
ratings = self.logit_fn(ratings)
possible_ratings = ratings.unique()
ratings_dict = {'itemID': item_ids,
'userID': user_ids,
'rating': ratings}
df = pd.DataFrame(ratings_dict)
reader = Reader(rating_scale=(0, 1))
data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)
trainset = data.build_full_trainset()
self.cf_algo.train(trainset)
def predict(self, dataset, sampler, batch_size=64):
# I'm not entirely sure that the build_full_testset
# function works as I'd expect, so instead we loop
# through all the test ids and predict one-at-a-time
preds = []
data_loader = DataLoader(dataset, batch_size=len(dataset), sampler=sampler)
sample = iter(data_loader).next()
ratings, user_ids, item_ids = self.dataloader_extract(sample)
for user_id, item_id in zip(user_ids, item_ids):
pred = self.cf_algo.predict(str(user_id), str(item_id))[3]
if self.logit:
pred = self.sigmoid_fn(pred)
preds.append(pred)
return(preds)
def score(self, dataset, sampler, batch_size=64, only_slow=True):
"""Scores the baseline on predictions made on the dataset provided,
sampled with the given sampler. If `only_slow` is true, then only
the slow judgments in the sampled part of the dataset are scored"""
predictions = self.predict(dataset, sampler, batch_size)
data_loader = DataLoader(dataset, batch_size=len(dataset), sampler=sampler)
testset = iter(data_loader).next()
ratings, user_ids, item_ids, = self.dataloader_extract(testset)
user_ids = user_ids.astype(int)
ratings = torch.Tensor(ratings)
predictions = torch.Tensor(predictions)
#Note that all baselines are passed flattened datasets, so we
# have to work out which of the users correspond to the latest
# times
if only_slow:
long_time_uids = [i for i in np.unique(user_ids) if i % 3==2]
new_ratings = []
new_preds = []
for index, rating in enumerate(ratings):
if user_ids[index] in long_time_uids: new_ratings.append(rating)
for index, pred in enumerate(predictions):
if user_ids[index] in long_time_uids: new_preds.append(pred)
loss = self.loss_fn(torch.Tensor(new_preds), torch.Tensor(new_ratings).cpu())
return loss.cpu().data.item()
else:
loss = self.loss_fn(predictions, ratings.cpu())
return loss.cpu().data.item()
|
#####
#
# This class is part of the Programming the Internet of Things project.
#
# It is provided as a simple shell to guide the student and assist with
# implementation for the Programming the Internet of Things exercises,
# and designed to be modified by the student as needed.
#
import logging
from programmingtheiot.common.IDataMessageListener import IDataMessageListener
from programmingtheiot.data.ActuatorData import ActuatorData
from programmingtheiot.cda.sim.HumidifierActuatorSimTask import HumidifierActuatorSimTask
from programmingtheiot.cda.sim.HvacActuatorSimTask import HvacActuatorSimTask
from programmingtheiot.data.SensorData import SensorData
class ActuatorAdapterManager(object):
"""
Shell representation of class for student implementation.
"""
def __init__(self, useEmulator: bool = False):
"""
If useEmulator is True means Emulator will be used and if False Simulator will be used.
"""
self.useEmulator = useEmulator
self.dataMsgListener = None
if self.useEmulator == True:
logging.info("Emulators will be used")
"""
Loading the Humidity Emulator
"""
humidifierModule = __import__('programmingtheiot.cda.emulated.HumidifierEmulatorTask', fromlist = ['HumidifierEmulatorTask'])
hueClazz = getattr(humidifierModule, 'HumidifierEmulatorTask')
self.humidifierEmulator = hueClazz()
"""
Loading the HVAC Emulator
"""
hvacModule = __import__('programmingtheiot.cda.emulated.HvacEmulatorTask', fromlist = ['HvacEmulatorTask'])
hvacAttribute = getattr(hvacModule, 'HvacEmulatorTask')
self.hvacEmulator = hvacAttribute()
"""
Loading the LED Emulator
"""
ledModule = __import__('programmingtheiot.cda.emulated.LedDisplayEmulatorTask', fromlist = ['LedDisplayEmulatorTask'])
ledAttribute = getattr(ledModule, 'LedDisplayEmulatorTask')
self.ledEmulator = ledAttribute()
else:
logging.info("Simulators will be used")
self.humidifierActuator = HumidifierActuatorSimTask()
self.hvacActuator = HvacActuatorSimTask()
def sendActuatorCommand(self, data: ActuatorData) -> bool:
if data != None and data.isResponseFlagEnabled() == False:
self.dataMsgListener.handleActuatorCommandResponse(data)
"""
Sends Humidifier or HVAC actuator commands based on the 'data' received.
"""
if self.useEmulator == False:
if data.actuatorType == ActuatorData.HUMIDIFIER_ACTUATOR_TYPE:
logging.info("Humidifier actuator initiated...")
self.humidifierActuator.updateActuator(data)
else:
logging.info("HVAC actuator initiated...")
self.hvacActuator.updateActuator(data)
else:
if data.actuatorType == ActuatorData.HUMIDIFIER_ACTUATOR_TYPE:
logging.info("Humidifier actuator initiated...")
self.humidifierEmulator.updateActuator(data)
else:
logging.info("HVAC actuator initiated...")
self.hvacEmulator.updateActuator(data)
def setDataMessageListener(self, listener: IDataMessageListener) -> bool:
"""
Receives data message from the actuators.
"""
if listener != None:
self.dataMsgListener = listener
return True |
from dotbimpy.file import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.