hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
268562770a21fee8a3d94159f9ef1e10697f56c3
| 23,901
|
py
|
Python
|
yocto/poky/bitbake/lib/bb/ui/knotty.py
|
libreswitch/libreswitch
|
1bb99e4bbc55aff46048453e28a1466b08d338aa
|
[
"Apache-2.0"
] | 16
|
2017-01-17T15:20:43.000Z
|
2021-03-19T05:45:14.000Z
|
yocto/poky/bitbake/lib/bb/ui/knotty.py
|
libreswitch/libreswitch
|
1bb99e4bbc55aff46048453e28a1466b08d338aa
|
[
"Apache-2.0"
] | 415
|
2016-12-20T17:20:45.000Z
|
2018-09-23T07:59:23.000Z
|
yocto/poky/bitbake/lib/bb/ui/knotty.py
|
libreswitch/libreswitch
|
1bb99e4bbc55aff46048453e28a1466b08d338aa
|
[
"Apache-2.0"
] | 10
|
2016-12-20T13:24:50.000Z
|
2021-03-19T05:46:43.000Z
|
#
# BitBake (No)TTY UI Implementation
#
# Handling output to TTYs or files (no TTY)
#
# Copyright (C) 2006-2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division
import os
import sys
import xmlrpclib
import logging
import progressbar
import signal
import bb.msg
import time
import fcntl
import struct
import copy
import atexit
from bb.ui import uihelper
featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS]
logger = logging.getLogger("BitBake")
interactive = sys.stdout.isatty()
class BBProgress(progressbar.ProgressBar):
def __init__(self, msg, maxval):
self.msg = msg
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
progressbar.ETA()]
try:
self._resize_default = signal.getsignal(signal.SIGWINCH)
except:
self._resize_default = None
progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets, fd=sys.stdout)
def _handle_resize(self, signum, frame):
progressbar.ProgressBar._handle_resize(self, signum, frame)
if self._resize_default:
self._resize_default(signum, frame)
def finish(self):
progressbar.ProgressBar.finish(self)
if self._resize_default:
signal.signal(signal.SIGWINCH, self._resize_default)
class NonInteractiveProgress(object):
fobj = sys.stdout
def __init__(self, msg, maxval):
self.msg = msg
self.maxval = maxval
def start(self):
self.fobj.write("%s..." % self.msg)
self.fobj.flush()
return self
def update(self, value):
pass
def finish(self):
self.fobj.write("done.\n")
self.fobj.flush()
def new_progress(msg, maxval):
if interactive:
return BBProgress(msg, maxval)
else:
return NonInteractiveProgress(msg, maxval)
def pluralise(singular, plural, qty):
if(qty == 1):
return singular % qty
else:
return plural % qty
class InteractConsoleLogFilter(logging.Filter):
def __init__(self, tf, format):
self.tf = tf
self.format = format
def filter(self, record):
if record.levelno == self.format.NOTE and (record.msg.startswith("Running") or record.msg.startswith("recipe ")):
return False
self.tf.clearFooter()
return True
class TerminalFilter(object):
rows = 25
columns = 80
def sigwinch_handle(self, signum, frame):
self.rows, self.columns = self.getTerminalColumns()
if self._sigwinch_default:
self._sigwinch_default(signum, frame)
def getTerminalColumns(self):
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, self.termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(sys.stdout.fileno())
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return cr
def __init__(self, main, helper, console, errconsole, format):
self.main = main
self.helper = helper
self.cuu = None
self.stdinbackup = None
self.interactive = sys.stdout.isatty()
self.footer_present = False
self.lastpids = []
if not self.interactive:
return
try:
import curses
except ImportError:
sys.exit("FATAL: The knotty ui could not load the required curses python module.")
import termios
self.curses = curses
self.termios = termios
try:
fd = sys.stdin.fileno()
self.stdinbackup = termios.tcgetattr(fd)
new = copy.deepcopy(self.stdinbackup)
new[3] = new[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, new)
curses.setupterm()
if curses.tigetnum("colors") > 2:
format.enable_color()
self.ed = curses.tigetstr("ed")
if self.ed:
self.cuu = curses.tigetstr("cuu")
try:
self._sigwinch_default = signal.getsignal(signal.SIGWINCH)
signal.signal(signal.SIGWINCH, self.sigwinch_handle)
except:
pass
self.rows, self.columns = self.getTerminalColumns()
except:
self.cuu = None
if not self.cuu:
self.interactive = False
bb.note("Unable to use interactive mode for this terminal, using fallback")
return
console.addFilter(InteractConsoleLogFilter(self, format))
errconsole.addFilter(InteractConsoleLogFilter(self, format))
def clearFooter(self):
if self.footer_present:
lines = self.footer_present
sys.stdout.write(self.curses.tparm(self.cuu, lines))
sys.stdout.write(self.curses.tparm(self.ed))
self.footer_present = False
def updateFooter(self):
if not self.cuu:
return
activetasks = self.helper.running_tasks
failedtasks = self.helper.failed_tasks
runningpids = self.helper.running_pids
if self.footer_present and (self.lastcount == self.helper.tasknumber_current) and (self.lastpids == runningpids):
return
if self.footer_present:
self.clearFooter()
if (not self.helper.tasknumber_total or self.helper.tasknumber_current == self.helper.tasknumber_total) and not len(activetasks):
return
tasks = []
for t in runningpids:
tasks.append("%s (pid %s)" % (activetasks[t]["title"], t))
if self.main.shutdown:
content = "Waiting for %s running tasks to finish:" % len(activetasks)
elif not len(activetasks):
content = "No currently running tasks (%s of %s)" % (self.helper.tasknumber_current, self.helper.tasknumber_total)
else:
content = "Currently %s running tasks (%s of %s):" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total)
print(content)
lines = 1 + int(len(content) / (self.columns + 1))
for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
content = "%s: %s" % (tasknum, task)
print(content)
lines = lines + 1 + int(len(content) / (self.columns + 1))
self.footer_present = lines
self.lastpids = runningpids[:]
self.lastcount = self.helper.tasknumber_current
def finish(self):
if self.stdinbackup:
fd = sys.stdin.fileno()
self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
def _log_settings_from_server(server):
# Get values of variables which control our output
includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS variable: %s" % error)
raise BaseException(error)
loglines, error = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
if error:
logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
raise BaseException(error)
consolelogfile, error = server.runCommand(["getSetVariable", "BB_CONSOLELOG"])
if error:
logger.error("Unable to get the value of BB_CONSOLELOG variable: %s" % error)
raise BaseException(error)
return includelogs, loglines, consolelogfile
_evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.LogRecord",
"bb.build.TaskFailed", "bb.build.TaskBase", "bb.event.ParseStarted",
"bb.event.ParseProgress", "bb.event.ParseCompleted", "bb.event.CacheLoadStarted",
"bb.event.CacheLoadProgress", "bb.event.CacheLoadCompleted", "bb.command.CommandFailed",
"bb.command.CommandExit", "bb.command.CommandCompleted", "bb.cooker.CookerExit",
"bb.event.MultipleProviders", "bb.event.NoProvider", "bb.runqueue.sceneQueueTaskStarted",
"bb.runqueue.runQueueTaskStarted", "bb.runqueue.runQueueTaskFailed", "bb.runqueue.sceneQueueTaskFailed",
"bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent"]
def main(server, eventHandler, params, tf = TerminalFilter):
includelogs, loglines, consolelogfile = _log_settings_from_server(server)
if sys.stdin.isatty() and sys.stdout.isatty():
log_exec_tty = True
else:
log_exec_tty = False
helper = uihelper.BBUIHelper()
console = logging.StreamHandler(sys.stdout)
errconsole = logging.StreamHandler(sys.stderr)
format_str = "%(levelname)s: %(message)s"
format = bb.msg.BBLogFormatter(format_str)
bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut)
bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr)
console.setFormatter(format)
errconsole.setFormatter(format)
logger.addHandler(console)
logger.addHandler(errconsole)
bb.utils.set_process_name("KnottyUI")
if params.options.remote_server and params.options.kill_server:
server.terminateServer()
return
if consolelogfile and not params.options.show_environment and not params.options.show_versions:
bb.utils.mkdirhier(os.path.dirname(consolelogfile))
conlogformat = bb.msg.BBLogFormatter(format_str)
consolelog = logging.FileHandler(consolelogfile)
bb.msg.addDefaultlogFilter(consolelog)
consolelog.setFormatter(conlogformat)
logger.addHandler(consolelog)
llevel, debug_domains = bb.msg.constructLogOptions()
server.runCommand(["setEventMask", server.getEventHandle(), llevel, debug_domains, _evt_list])
universe = False
if not params.observe_only:
params.updateFromServer(server)
params.updateToServer(server, os.environ.copy())
cmdline = params.parseActions()
if not cmdline:
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
return 1
if 'msg' in cmdline and cmdline['msg']:
logger.error(cmdline['msg'])
return 1
if cmdline['action'][0] == "buildTargets" and "universe" in cmdline['action'][1]:
universe = True
ret, error = server.runCommand(cmdline['action'])
if error:
logger.error("Command '%s' failed: %s" % (cmdline, error))
return 1
elif ret != True:
logger.error("Command '%s' failed: returned %s" % (cmdline, ret))
return 1
parseprogress = None
cacheprogress = None
main.shutdown = 0
interrupted = False
return_value = 0
errors = 0
warnings = 0
taskfailures = []
termfilter = tf(main, helper, console, errconsole, format)
atexit.register(termfilter.finish)
while True:
try:
event = eventHandler.waitEvent(0)
if event is None:
if main.shutdown > 1:
break
termfilter.updateFooter()
event = eventHandler.waitEvent(0.25)
if event is None:
continue
helper.eventHandler(event)
if isinstance(event, bb.runqueue.runQueueExitWait):
if not main.shutdown:
main.shutdown = 1
continue
if isinstance(event, bb.event.LogExecTTY):
if log_exec_tty:
tries = event.retries
while tries:
print("Trying to run: %s" % event.prog)
if os.system(event.prog) == 0:
break
time.sleep(event.sleep_delay)
tries -= 1
if tries:
continue
logger.warn(event.msg)
continue
if isinstance(event, logging.LogRecord):
if event.levelno >= format.ERROR:
errors = errors + 1
return_value = 1
elif event.levelno == format.WARNING:
warnings = warnings + 1
if event.taskpid != 0:
# For "normal" logging conditions, don't show note logs from tasks
# but do show them if the user has changed the default log level to
# include verbose/debug messages
if event.levelno <= format.NOTE and (event.levelno < llevel or (event.levelno == format.NOTE and llevel != format.VERBOSE)):
continue
# Prefix task messages with recipe/task
if event.taskpid in helper.running_tasks:
taskinfo = helper.running_tasks[event.taskpid]
event.msg = taskinfo['title'] + ': ' + event.msg
if hasattr(event, 'fn'):
event.msg = event.fn + ': ' + event.msg
logger.handle(event)
continue
if isinstance(event, bb.build.TaskFailedSilent):
logger.warn("Logfile for failed setscene task is %s" % event.logfile)
continue
if isinstance(event, bb.build.TaskFailed):
return_value = 1
logfile = event.logfile
if logfile and os.path.exists(logfile):
termfilter.clearFooter()
bb.error("Logfile of failure stored in: %s" % logfile)
if includelogs and not event.errprinted:
print("Log data follows:")
f = open(logfile, "r")
lines = []
while True:
l = f.readline()
if l == '':
break
l = l.rstrip()
if loglines:
lines.append(' | %s' % l)
if len(lines) > int(loglines):
lines.pop(0)
else:
print('| %s' % l)
f.close()
if lines:
for line in lines:
print(line)
if isinstance(event, bb.build.TaskBase):
logger.info(event._message)
continue
if isinstance(event, bb.event.ParseStarted):
if event.total == 0:
continue
parseprogress = new_progress("Parsing recipes", event.total).start()
continue
if isinstance(event, bb.event.ParseProgress):
parseprogress.update(event.current)
continue
if isinstance(event, bb.event.ParseCompleted):
if not parseprogress:
continue
parseprogress.finish()
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
continue
if isinstance(event, bb.event.CacheLoadStarted):
cacheprogress = new_progress("Loading cache", event.total).start()
continue
if isinstance(event, bb.event.CacheLoadProgress):
cacheprogress.update(event.current)
continue
if isinstance(event, bb.event.CacheLoadCompleted):
cacheprogress.finish()
print("Loaded %d entries from dependency cache." % event.num_entries)
continue
if isinstance(event, bb.command.CommandFailed):
return_value = event.exitcode
if event.error:
errors = errors + 1
logger.error("Command execution failed: %s", event.error)
main.shutdown = 2
continue
if isinstance(event, bb.command.CommandExit):
if not return_value:
return_value = event.exitcode
continue
if isinstance(event, (bb.command.CommandCompleted, bb.cooker.CookerExit)):
main.shutdown = 2
continue
if isinstance(event, bb.event.MultipleProviders):
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
event._item,
", ".join(event._candidates))
rtime = ""
if event._is_runtime:
rtime = "R"
logger.info("consider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, event._item))
continue
if isinstance(event, bb.event.NoProvider):
if event._runtime:
r = "R"
else:
r = ""
extra = ''
if not event._reasons:
if event._close_matches:
extra = ". Close matches:\n %s" % '\n '.join(event._close_matches)
# For universe builds, only show these as warnings, not errors
h = logger.warning
if not universe:
return_value = 1
errors = errors + 1
h = logger.error
if event._dependees:
h("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s", r, event._item, ", ".join(event._dependees), r, extra)
else:
h("Nothing %sPROVIDES '%s'%s", r, event._item, extra)
if event._reasons:
for reason in event._reasons:
h("%s", reason)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
continue
if isinstance(event, bb.runqueue.runQueueTaskStarted):
if event.noexec:
tasktype = 'noexec task'
else:
tasktype = 'task'
logger.info("Running %s %s of %s (ID: %s, %s)",
tasktype,
event.stats.completed + event.stats.active +
event.stats.failed + 1,
event.stats.total, event.taskid, event.taskstring)
continue
if isinstance(event, bb.runqueue.runQueueTaskFailed):
return_value = 1
taskfailures.append(event.taskstring)
logger.error("Task %s (%s) failed with exit code '%s'",
event.taskid, event.taskstring, event.exitcode)
continue
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
logger.warn("Setscene task %s (%s) failed with exit code '%s' - real task will be run instead",
event.taskid, event.taskstring, event.exitcode)
continue
if isinstance(event, bb.event.DepTreeGenerated):
continue
# ignore
if isinstance(event, (bb.event.BuildBase,
bb.event.MetadataEvent,
bb.event.StampUpdate,
bb.event.ConfigParsed,
bb.event.RecipeParsed,
bb.event.RecipePreFinalise,
bb.runqueue.runQueueEvent,
bb.event.OperationStarted,
bb.event.OperationCompleted,
bb.event.OperationProgress,
bb.event.DiskFull)):
continue
logger.error("Unknown event: %s", event)
except EnvironmentError as ioerror:
termfilter.clearFooter()
# ignore interrupted io
if ioerror.args[0] == 4:
continue
sys.stderr.write(str(ioerror))
if not params.observe_only:
_, error = server.runCommand(["stateForceShutdown"])
main.shutdown = 2
except KeyboardInterrupt:
termfilter.clearFooter()
if params.observe_only:
print("\nKeyboard Interrupt, exiting observer...")
main.shutdown = 2
if not params.observe_only and main.shutdown == 1:
print("\nSecond Keyboard Interrupt, stopping...\n")
_, error = server.runCommand(["stateForceShutdown"])
if error:
logger.error("Unable to cleanly stop: %s" % error)
if not params.observe_only and main.shutdown == 0:
print("\nKeyboard Interrupt, closing down...\n")
interrupted = True
_, error = server.runCommand(["stateShutdown"])
if error:
logger.error("Unable to cleanly shutdown: %s" % error)
main.shutdown = main.shutdown + 1
pass
except Exception as e:
import traceback
sys.stderr.write(traceback.format_exc())
if not params.observe_only:
_, error = server.runCommand(["stateForceShutdown"])
main.shutdown = 2
return_value = 1
try:
summary = ""
if taskfailures:
summary += pluralise("\nSummary: %s task failed:",
"\nSummary: %s tasks failed:", len(taskfailures))
for failure in taskfailures:
summary += "\n %s" % failure
if warnings:
summary += pluralise("\nSummary: There was %s WARNING message shown.",
"\nSummary: There were %s WARNING messages shown.", warnings)
if return_value and errors:
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
if summary:
print(summary)
if interrupted:
print("Execution was interrupted, returning a non-zero exit code.")
if return_value == 0:
return_value = 1
except IOError as e:
import errno
if e.errno == errno.EPIPE:
pass
return return_value
| 40.169748
| 175
| 0.557843
|
c841c80869a2cb40a9a667feca26835a0271be2b
| 3,509
|
py
|
Python
|
legacy/ad/feature_extractor/list_queries.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 2
|
2020-04-09T13:04:25.000Z
|
2021-09-24T14:17:26.000Z
|
legacy/ad/feature_extractor/list_queries.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | null | null | null |
legacy/ad/feature_extractor/list_queries.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 3
|
2019-09-20T20:49:54.000Z
|
2021-09-02T17:33:47.000Z
|
#! /usr/bin/env python3
import sys, os, csv, json, math, logging
sys.path.append(os.path.expanduser('~/adapt/tools'))
import gremlin_query
# varibale bindings for gremlin queries
bindings = {
'ETYPE':'eventType',
'STYPE':'subjectType',
'SIZE':'size',
'STIME':'startedAtTime',
'DPORT':'dstPort',
'SPORT':'srcPort',
'DADDRESS':'dstAddress',
'PROCESS':0,
'EVENT':4,
'CHECK_FILE_ATTRIBUTES':3,
'CLOSE':5,
'CONNECT':6,
'EXECUTE':9,
'UNLINK':12,
'MODIFY_FILE_ATTRIBUTES':14,
'OPEN':16,
'READ':17,
'RENAME':20,
'WRITE':21,
'EXIT':36,
'F_A_E_I':'EDGE_FILE_AFFECTS_EVENT in',
'F_A_E_O':'EDGE_FILE_AFFECTS_EVENT out',
'E_A_F_I':'EDGE_EVENT_AFFECTS_FILE in',
'E_A_F_O':'EDGE_EVENT_AFFECTS_FILE out',
'N_A_E_I':'EDGE_NETFLOW_AFFECTS_EVENT in',
'N_A_E_O':'EDGE_NETFLOW_AFFECTS_EVENT out',
'E_A_N_I':'EDGE_EVENT_AFFECTS_NETFLOW in',
'E_A_N_O':'EDGE_EVENT_AFFECTS_NETFLOW out',
'E_G_B_S_I':'EDGE_EVENT_ISGENERATEDBY_SUBJECT in',
'E_G_B_S_O':'EDGE_EVENT_ISGENERATEDBY_SUBJECT out'
}
"""
fields:
view_type : string, specifies the name of the view
node_ids_query : string, query to get ids of the prospective nodes
features_queries : map<string, string>, set of feature names and
it's corresponding gremlin query to compute that feature.
Each query must be applicable to the individual nodes
returned by node_ids_query above
"""
class ListQueries:
def __init__(self, vt, nq, fq):
self.view_type = vt
self.node_ids_query = nq
self.features_queries = fq
def list_view(self):
# extract features
keys = sorted(self.features_queries.keys())
QUERY = self.node_ids_query + ";if(IDS!=[]){"
for i in range(0,len(keys)):
if type(self.features_queries[keys[i]]) == type(dict()):
QUERY += "yf{}=".format(i) + self.features_queries[keys[i]]['first'] + ";"
if 'second' in self.features_queries[keys[i]].keys():
QUERY += "ys{}=".format(i) + self.features_queries[keys[i]]['second'] + ";"
if 'third' in self.features_queries[keys[i]].keys():
QUERY += "yt{}=".format(i) + self.features_queries[keys[i]]['third'] + ";"
else:
QUERY += "x{}=".format(i) + self.features_queries[keys[i]] + ";"
QUERY += "[IDS"
for i in range(0,len(keys)):
if type(self.features_queries[keys[i]]) == type(dict()):
QUERY += ",[yf{}.toList()".format(i)
if 'second' in self.features_queries[keys[i]].keys():
QUERY += ",ys{}.toList()".format(i)
if 'third' in self.features_queries[keys[i]].keys():
QUERY += ",yt{}.toList()".format(i)
QUERY += "]"
else:
QUERY += ",x{}.toList()".format(i)
QUERY += "]}else [];"
queries = QUERY.split(';')
print("VIEW " + self.view_type + "\n")
for q in queries:
print("{0}".format(q))
return True
if __name__ == '__main__':
in_json = sys.argv[1]
with open(in_json) as f:
views = json.loads(f.read())
for view_type in sorted(views.keys()):
view_data = views[view_type]
view = ListQueries(view_type, view_data['instance_set'], view_data['feature_set'])
view.list_view()
| 34.067961
| 95
| 0.572813
|
9777e88df6ca4f9dd20ff48a33090d4b31630f5e
| 88,284
|
py
|
Python
|
Lib/datetime.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 4
|
2020-08-06T04:39:33.000Z
|
2020-12-01T08:35:09.000Z
|
Lib/datetime.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 6
|
2020-07-22T01:19:01.000Z
|
2021-04-25T15:03:35.000Z
|
Lib/datetime.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 2
|
2020-12-02T03:52:33.000Z
|
2021-01-20T01:36:09.000Z
|
"""Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
import sys
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
# -1 is a placeholder for indexing purposes.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us, timespec='auto'):
specs = {
'hours': '{:02d}',
'minutes': '{:02d}:{:02d}',
'seconds': '{:02d}:{:02d}:{:02d}',
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
}
if timespec == 'auto':
# Skip trailing microseconds when us==0.
timespec = 'microseconds' if us else 'seconds'
elif timespec == 'milliseconds':
us //= 1000
try:
fmt = specs[timespec]
except KeyError:
raise ValueError('Unknown timespec value')
else:
return fmt.format(hh, mm, ss, us)
def _format_offset(off):
s = ''
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
mm, ss = divmod(mm, timedelta(minutes=1))
s += "%s%02d:%02d" % (sign, hh, mm)
if ss or ss.microseconds:
s += ":%02d" % ss.seconds
if ss.microseconds:
s += '.%06d' % ss.microseconds
return s
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, rest = divmod(offset, timedelta(hours=1))
m, rest = divmod(rest, timedelta(minutes=1))
s = rest.seconds
u = offset.microseconds
if u:
zreplace = '%c%02d%02d%02d.%06d' % (sign, h, m, s, u)
elif s:
zreplace = '%c%02d%02d%02d' % (sign, h, m, s)
else:
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
# Helpers for parsing the result of isoformat()
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos+2])
pos += 2
next_char = tstr[pos:pos+1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be strictly between "
"-timedelta(hours=24) and timedelta(hours=24)" %
(name, offset))
def _check_int_field(value):
if isinstance(value, int):
return value
if isinstance(value, float):
raise TypeError('integer argument expected, got float')
try:
value = value.__index__()
except AttributeError:
pass
else:
if not isinstance(value, int):
raise TypeError('__index__ returned non-int (type %s)' %
type(value).__name__)
return value
orig = value
try:
value = value.__int__()
except AttributeError:
pass
else:
if not isinstance(value, int):
raise TypeError('__int__ returned non-int (type %s)' %
type(value).__name__)
import warnings
warnings.warn("an integer is required (got type %s)" %
type(orig).__name__,
DeprecationWarning,
stacklevel=2)
return value
raise TypeError('an integer is required (got type %s)' %
type(value).__name__)
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond, fold):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
if fold not in (0, 1):
raise ValueError('fold must be either 0 or 1', fold)
return hour, minute, second, microsecond, fold
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds = round(microseconds + usdouble)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
else:
microseconds = int(microseconds)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
args = []
if self._days:
args.append("days=%d" % self._days)
if self._seconds:
args.append("seconds=%d" % self._seconds)
if self._microseconds:
args.append("microseconds=%d" % self._microseconds)
if not args:
args.append('0')
return "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
', '.join(args))
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
return NotImplemented
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (month is None and
isinstance(year, (bytes, str)) and len(year) == 4 and
1 <= ord(year[2:3]) <= 12):
# Pickle support
if isinstance(year, str):
try:
year = year.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a date from the output of date.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
assert len(date_string) == 10
return cls(*_parse_isoformat_date(date_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
@classmethod
def fromisocalendar(cls, year, week, day):
"""Construct a date from the ISO year, week number and weekday.
This is the inverse of the date.isocalendar() function"""
# Year is bounded this way because 9999-12-31 is (9999, 52, 5)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError(f"Year is out of range: {year}")
if not 0 < week < 53:
out_of_range = True
if week == 53:
# ISO years have 53 weeks in them on years starting with a
# Thursday and leap years starting on a Wednesday
first_weekday = _ymd2ord(year, 1, 1) % 7
if (first_weekday == 4 or (first_weekday == 3 and
_is_leap(year))):
out_of_range = False
if out_of_range:
raise ValueError(f"Invalid week: {week}")
if not 0 < day < 8:
raise ValueError(f"Invalid weekday: {day} (range is [1, 7])")
# Now compute the offset from (Y, 1, 1) in days:
day_offset = (week - 1) * 7 + (day - 1)
# Calculate the ordinal day for monday, week 1
day_1 = _isoweek1monday(year)
ord_day = day_1 + day_offset
return cls(*_ord2ymd(ord_day))
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return type(self)(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return type(self).fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> timedelta, positive for east of UTC, negative for west of UTC"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset as timedelta, positive for east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo, fold
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
fold (keyword only, default to zero)
"""
if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
ord(hour[0:1])&0x7F < 24):
# Pickle support
if isinstance(hour, str):
try:
hour = hour.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a time object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
return NotImplemented
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(t._getstate()[0])
else:
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self):
"""Return formatted timezone offset (+xx:xx) or an empty string."""
off = self.utcoffset()
return _format_offset(off)
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def isoformat(self, timespec='auto'):
"""Return the time formatted according to ISO.
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond, timespec)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
@classmethod
def fromisoformat(cls, time_string):
"""Construct a time from the output of isoformat()."""
if not isinstance(time_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
return cls(*_parse_isoformat_time(time_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {time_string!r}')
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset as timedelta, positive east of UTC
(negative west of UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
# Pickle support.
def _getstate(self, protocol=3):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
h = self._hour
if self._fold and protocol > 3:
h += 128
basestate = bytes([h, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
h, self._minute, self._second, us1, us2, us3 = string
if h > 127:
self._fold = 1
self._hour = h - 128
else:
self._fold = 0
self._hour = h
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None, *, fold=0):
if (isinstance(year, (bytes, str)) and len(year) == 10 and
1 <= ord(year[2:3])&0x7F <= 12):
# Pickle support
if isinstance(year, str):
try:
year = bytes(year, 'latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a datetime object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
@classmethod
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is None:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
max_fold_seconds = 24 * 3600
# On Windows localtime_s throws an OSError for negative values,
# thus we can't perform fold detection for values of time less
# than the max time fold. See comments in _datetimemodule's
# version of this method for more details.
if t < max_fold_seconds and sys.platform.startswith("win"):
return result
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
trans = result - probe1 - timedelta(0, max_fold_seconds)
if trans.days < 0:
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
else:
result = tz.fromutc(result)
return result
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
return cls._fromtimestamp(t, tz is not None, tz)
@classmethod
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time, tzinfo=True):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
if tzinfo is True:
tzinfo = time.tzinfo
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
tzinfo, fold=time.fold)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
else:
time_components = [0, 0, 0, 0, None]
return cls(*(date_components + time_components))
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def _mktime(self):
"""Return integer POSIX timestamp."""
epoch = datetime(1970, 1, 1)
max_fold_seconds = 24 * 3600
t = (self - epoch) // timedelta(0, 1)
def local(u):
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
# Our goal is to solve t = local(u) for u.
a = local(t) - t
u1 = t - a
t1 = local(u1)
if t1 == t:
# We found one solution, but it may not be the one we need.
# Look for an earlier solution (if `fold` is 0), or a
# later one (if `fold` is 1).
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
b = local(u2) - u2
if a == b:
return u1
else:
b = t1 - u1
assert a != b
u2 = t - b
t2 = local(u2)
if t2 == t:
return u2
if t1 == t:
return u1
# We have found both offsets a and b, but neither t - a nor t - b is
# a solution. This means t is in the gap.
return (max, min)[self.fold](u1, u2)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
s = self._mktime()
return s + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo, fold=self.fold)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True,
*, fold=None):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
else:
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
# Extract TZ data
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
return timezone(timedelta(seconds=gmtoff), zone)
def astimezone(self, tz=None):
if tz is None:
tz = self._local_timezone()
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
myoffset = mytz.utcoffset(self)
else:
myoffset = mytz.utcoffset(self)
if myoffset is None:
mytz = self.replace(tzinfo=None)._local_timezone()
myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T', timespec='auto'):
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include. Valid options are 'auto', 'hours',
'minutes', 'seconds', 'milliseconds' and 'microseconds'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond, timespec))
off = self.utcoffset()
tz = _format_offset(off)
if tz:
s += tz
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset as timedelta positive east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (as timedelta
positive eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
# Assume that allow_mixed means that we are called from __eq__
if allow_mixed:
if myoff != self.replace(fold=not self.fold).utcoffset():
return 2
if otoff != other.replace(fold=not other.fold).utcoffset():
return 2
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return type(self).combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if tzoff is None:
self._hashcode = hash(t._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self, protocol=3):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
m = self._month
if self._fold and protocol > 3:
m += 128
basestate = bytes([yhi, ylo, m, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, m, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
if m > 127:
self._fold = 1
self._month = m - 128
else:
self._fold = 0
self._month = m
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24).")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if isinstance(other, timezone):
return self._offset == other._offset
return NotImplemented
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s.%s(%r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset)
return "%s.%s(%r, %r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=24, microseconds=-1)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if not delta:
return 'UTC'
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes, rest = divmod(rest, timedelta(minutes=1))
seconds = rest.seconds
microseconds = rest.microseconds
if microseconds:
return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
f'.{microseconds:06d}')
if seconds:
return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
return f'UTC{sign}{hours:02d}:{minutes:02d}'
timezone.utc = timezone._create(timedelta(0))
# bpo-37642: These attributes are rounded to the nearest minute for backwards
# compatibility, even though the constructor will accept a wider range of
# values. This may change in the future.
timezone.min = timezone._create(-timedelta(hours=23, minutes=59))
timezone.max = timezone._create(timedelta(hours=23, minutes=59))
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
# Some time zone algebra. For a datetime x, let
# x.n = x stripped of its timezone -- its naive time.
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
# return None
# x.d = x.dst(), and assuming that doesn't raise an exception or
# return None
# x.s = x's standard offset, x.o - x.d
#
# Now some derived rules, where k is a duration (timedelta).
#
# 1. x.o = x.s + x.d
# This follows from the definition of x.s.
#
# 2. If x and y have the same tzinfo member, x.s = y.s.
# This is actually a requirement, an assumption we need to make about
# sane tzinfo classes.
#
# 3. The naive UTC time corresponding to x is x.n - x.o.
# This is again a requirement for a sane tzinfo class.
#
# 4. (x+k).s = x.s
# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
#
# 5. (x+k).n = x.n + k
# Again follows from how arithmetic is defined.
#
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
# (meaning that the various tzinfo methods exist, and don't blow up or return
# None when called).
#
# The function wants to return a datetime y with timezone tz, equivalent to x.
# x is already in UTC.
#
# By #3, we want
#
# y.n - y.o = x.n [1]
#
# The algorithm starts by attaching tz to x.n, and calling that y. So
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
# becomes true; in effect, we want to solve [2] for k:
#
# (y+k).n - (y+k).o = x.n [2]
#
# By #1, this is the same as
#
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
#
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
# Substituting that into [3],
#
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
# k - (y+k).s - (y+k).d = 0; rearranging,
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
# k = y.s - (y+k).d
#
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
# very large, since all offset-returning methods return a duration of magnitude
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
# be 0, so ignoring it has no consequence then.
#
# In any case, the new value is
#
# z = y + y.s [4]
#
# It's helpful to step back at look at [4] from a higher level: it's simply
# mapping from UTC to tz's standard time.
#
# At this point, if
#
# z.n - z.o = x.n [5]
#
# we have an equivalent time, and are almost done. The insecurity here is
# at the start of daylight time. Picture US Eastern for concreteness. The wall
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
# on the day DST starts. We want to return the 1:MM EST spelling because that's
# the only spelling that makes sense on the local wall clock.
#
# In fact, if [5] holds at this point, we do have the standard-time spelling,
# but that takes a bit of proof. We first prove a stronger result. What's the
# difference between the LHS and RHS of [5]? Let
#
# diff = x.n - (z.n - z.o) [6]
#
# Now
# z.n = by [4]
# (y + y.s).n = by #5
# y.n + y.s = since y.n = x.n
# x.n + y.s = since z and y are have the same tzinfo member,
# y.s = z.s by #2
# x.n + z.s
#
# Plugging that back into [6] gives
#
# diff =
# x.n - ((x.n + z.s) - z.o) = expanding
# x.n - x.n - z.s + z.o = cancelling
# - z.s + z.o = by #2
# z.d
#
# So diff = z.d.
#
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
# spelling we wanted in the endcase described above. We're done. Contrarily,
# if z.d = 0, then we have a UTC equivalent, and are also done.
#
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
# add to z (in effect, z is in tz's standard time, and we need to shift the
# local clock into tz's daylight time).
#
# Let
#
# z' = z + z.d = z + diff [7]
#
# and we can again ask whether
#
# z'.n - z'.o = x.n [8]
#
# If so, we're done. If not, the tzinfo class is insane, according to the
# assumptions we've made. This also requires a bit of proof. As before, let's
# compute the difference between the LHS and RHS of [8] (and skipping some of
# the justifications for the kinds of substitutions we've done several times
# already):
#
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
# - z.n + z.n - z.o + z'.o = cancel z.n
# - z.o + z'.o = #1 twice
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
# z'.d - z.d
#
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
# return z', not bothering to compute z'.d.
#
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
# would have to change the result dst() returns: we start in DST, and moving
# a little further into it takes us out of DST.
#
# There isn't a sane case where this can happen. The closest it gets is at
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
# standard time. Since that's what the local clock *does*, we want to map both
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
# in local time, but so it goes -- it's the way the local clock works.
#
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
# (correctly) concludes that z' is not UTC-equivalent to x.
#
# Because we know z.d said z was in daylight time (else [5] would have held and
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
# and we have stopped then), and there are only 2 possible values dst() can
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
# but the reasoning doesn't depend on the example -- it depends on there being
# two possible dst() outcomes, one zero and the other non-zero). Therefore
# z' must be in standard time, and is the spelling we want in this case.
#
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
# concerned (because it takes z' as being in standard time rather than the
# daylight time we intend here), but returning it gives the real-life "local
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
# tz.
#
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
# the 1:MM standard time spelling we want.
#
# So how can this break? One of the assumptions must be violated. Two
# possibilities:
#
# 1) [2] effectively says that y.s is invariant across all y belong to a given
# time zone. This isn't true if, for political reasons or continental drift,
# a region decides to change its base offset from UTC.
#
# 2) There may be versions of "double daylight" time where the tail end of
# the analysis gives up a step too early. I haven't thought about that
# enough to say.
#
# In any case, it's clear that the default fromutc() is strong enough to handle
# "almost all" time zones: so long as the standard offset is invariant, it
# doesn't matter if daylight time transition points change from year to year, or
# if daylight time is skipped in some years; it doesn't matter how large or
# small dst() may get within its bounds; and it doesn't even matter if some
# perverse time zone returns a negative dst()). So a breaking case must be
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
_check_date_fields, _check_int_field, _check_time_fields,
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
_date_class, _days_before_month, _days_before_year, _days_in_month,
_format_time, _format_offset, _is_leap, _isoweek1monday, _math,
_ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
_divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
_parse_hh_mm_ss_ff)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
from _datetime import __doc__
| 35.019437
| 94
| 0.565346
|
fde224c37bad686fd6ce41d369572344e37a4e0e
| 187
|
py
|
Python
|
math/linear-algebra/np-1.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | 3
|
2021-06-12T04:42:32.000Z
|
2021-06-24T13:57:38.000Z
|
math/linear-algebra/np-1.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | null | null | null |
math/linear-algebra/np-1.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | 1
|
2021-08-19T14:57:17.000Z
|
2021-08-19T14:57:17.000Z
|
import numpy as np
x = [5.0, 1.0, 3.0]
a = np.array(x)
print(a)
print(a.shape)
print(a.dtype)
b = np.empty([2, 2])
print(b)
c = np.zeros([3, 3])
print(c)
d = np.ones([5, 1])
print(d)
| 11
| 20
| 0.566845
|
48df2c06d143977a6b45468bb6132a16c76a187a
| 9,925
|
py
|
Python
|
pyqtgraph/graphicsItems/ViewBox/ViewBoxMenu.py
|
hyperiongeo/pyqtgraph
|
7f7c6217cc2fae5fdfad6d3c27739461abd32c84
|
[
"MIT"
] | null | null | null |
pyqtgraph/graphicsItems/ViewBox/ViewBoxMenu.py
|
hyperiongeo/pyqtgraph
|
7f7c6217cc2fae5fdfad6d3c27739461abd32c84
|
[
"MIT"
] | null | null | null |
pyqtgraph/graphicsItems/ViewBox/ViewBoxMenu.py
|
hyperiongeo/pyqtgraph
|
7f7c6217cc2fae5fdfad6d3c27739461abd32c84
|
[
"MIT"
] | null | null | null |
import importlib
from ...Qt import QT_LIB, QtCore, QtGui, QtWidgets
from ...WidgetGroup import WidgetGroup
ui_template = importlib.import_module(
f'.axisCtrlTemplate_{QT_LIB.lower()}', package=__package__)
import weakref
translate = QtCore.QCoreApplication.translate
class ViewBoxMenu(QtWidgets.QMenu):
def __init__(self, view):
QtWidgets.QMenu.__init__(self)
self.view = weakref.ref(view) ## keep weakref to view to avoid circular reference (don't know why, but this prevents the ViewBox from being collected)
self.valid = False ## tells us whether the ui needs to be updated
self.viewMap = weakref.WeakValueDictionary() ## weakrefs to all views listed in the link combos
self.setTitle(translate("ViewBox", "ViewBox options"))
self.viewAll = QtGui.QAction(translate("ViewBox", "View All"), self)
self.viewAll.triggered.connect(self.autoRange)
self.addAction(self.viewAll)
self.axes = []
self.ctrl = []
self.widgetGroups = []
self.dv = QtGui.QDoubleValidator(self)
for axis in 'XY':
m = QtWidgets.QMenu()
m.setTitle(f"{axis} {translate('ViewBox', 'axis')}")
w = QtWidgets.QWidget()
ui = ui_template.Ui_Form()
ui.setupUi(w)
a = QtWidgets.QWidgetAction(self)
a.setDefaultWidget(w)
m.addAction(a)
self.addMenu(m)
self.axes.append(m)
self.ctrl.append(ui)
wg = WidgetGroup(w)
self.widgetGroups.append(w)
connects = [
(ui.mouseCheck.toggled, 'MouseToggled'),
(ui.manualRadio.clicked, 'ManualClicked'),
(ui.minText.editingFinished, 'RangeTextChanged'),
(ui.maxText.editingFinished, 'RangeTextChanged'),
(ui.autoRadio.clicked, 'AutoClicked'),
(ui.autoPercentSpin.valueChanged, 'AutoSpinChanged'),
(ui.linkCombo.currentIndexChanged, 'LinkComboChanged'),
(ui.autoPanCheck.toggled, 'AutoPanToggled'),
(ui.visibleOnlyCheck.toggled, 'VisibleOnlyToggled')
]
for sig, fn in connects:
sig.connect(getattr(self, axis.lower()+fn))
self.ctrl[0].invertCheck.toggled.connect(self.xInvertToggled)
self.ctrl[1].invertCheck.toggled.connect(self.yInvertToggled)
## exporting is handled by GraphicsScene now
#self.export = QtWidgets.QMenu("Export")
#self.setExportMethods(view.exportMethods)
#self.addMenu(self.export)
self.leftMenu = QtWidgets.QMenu(translate("ViewBox", "Mouse Mode"))
group = QtGui.QActionGroup(self)
# This does not work! QAction _must_ be initialized with a permanent
# object as the parent or else it may be collected prematurely.
#pan = self.leftMenu.addAction("3 button", self.set3ButtonMode)
#zoom = self.leftMenu.addAction("1 button", self.set1ButtonMode)
pan = QtGui.QAction(translate("ViewBox", "3 button"), self.leftMenu)
zoom = QtGui.QAction(translate("ViewBox", "1 button"), self.leftMenu)
self.leftMenu.addAction(pan)
self.leftMenu.addAction(zoom)
pan.triggered.connect(self.set3ButtonMode)
zoom.triggered.connect(self.set1ButtonMode)
pan.setCheckable(True)
zoom.setCheckable(True)
pan.setActionGroup(group)
zoom.setActionGroup(group)
self.mouseModes = [pan, zoom]
self.addMenu(self.leftMenu)
self.view().sigStateChanged.connect(self.viewStateChanged)
self.updateState()
def setExportMethods(self, methods):
self.exportMethods = methods
self.export.clear()
for opt, fn in methods.items():
self.export.addAction(opt, self.exportMethod)
def viewStateChanged(self):
self.valid = False
if self.ctrl[0].minText.isVisible() or self.ctrl[1].minText.isVisible():
self.updateState()
def updateState(self):
## Something about the viewbox has changed; update the menu GUI
state = self.view().getState(copy=False)
if state['mouseMode'] == ViewBox.PanMode:
self.mouseModes[0].setChecked(True)
else:
self.mouseModes[1].setChecked(True)
for i in [0,1]: # x, y
tr = state['targetRange'][i]
self.ctrl[i].minText.setText("%0.5g" % tr[0])
self.ctrl[i].maxText.setText("%0.5g" % tr[1])
if state['autoRange'][i] is not False:
self.ctrl[i].autoRadio.setChecked(True)
if state['autoRange'][i] is not True:
self.ctrl[i].autoPercentSpin.setValue(state['autoRange'][i]*100)
else:
self.ctrl[i].manualRadio.setChecked(True)
self.ctrl[i].mouseCheck.setChecked(state['mouseEnabled'][i])
## Update combo to show currently linked view
c = self.ctrl[i].linkCombo
c.blockSignals(True)
try:
view = state['linkedViews'][i] ## will always be string or None
if view is None:
view = ''
ind = c.findText(view)
if ind == -1:
ind = 0
c.setCurrentIndex(ind)
finally:
c.blockSignals(False)
self.ctrl[i].autoPanCheck.setChecked(state['autoPan'][i])
self.ctrl[i].visibleOnlyCheck.setChecked(state['autoVisibleOnly'][i])
xy = ['x', 'y'][i]
self.ctrl[i].invertCheck.setChecked(state.get(xy+'Inverted', False))
self.valid = True
def popup(self, *args):
if not self.valid:
self.updateState()
QtWidgets.QMenu.popup(self, *args)
def autoRange(self):
self.view().autoRange() ## don't let signal call this directly--it'll add an unwanted argument
def xMouseToggled(self, b):
self.view().setMouseEnabled(x=b)
def xManualClicked(self):
self.view().enableAutoRange(ViewBox.XAxis, False)
def xRangeTextChanged(self):
self.ctrl[0].manualRadio.setChecked(True)
self.view().setXRange(*self._validateRangeText(0), padding=0)
def xAutoClicked(self):
val = self.ctrl[0].autoPercentSpin.value() * 0.01
self.view().enableAutoRange(ViewBox.XAxis, val)
def xAutoSpinChanged(self, val):
self.ctrl[0].autoRadio.setChecked(True)
self.view().enableAutoRange(ViewBox.XAxis, val*0.01)
def xLinkComboChanged(self, ind):
self.view().setXLink(str(self.ctrl[0].linkCombo.currentText()))
def xAutoPanToggled(self, b):
self.view().setAutoPan(x=b)
def xVisibleOnlyToggled(self, b):
self.view().setAutoVisible(x=b)
def yMouseToggled(self, b):
self.view().setMouseEnabled(y=b)
def yManualClicked(self):
self.view().enableAutoRange(ViewBox.YAxis, False)
def yRangeTextChanged(self):
self.ctrl[1].manualRadio.setChecked(True)
self.view().setYRange(*self._validateRangeText(1), padding=0)
def yAutoClicked(self):
val = self.ctrl[1].autoPercentSpin.value() * 0.01
self.view().enableAutoRange(ViewBox.YAxis, val)
def yAutoSpinChanged(self, val):
self.ctrl[1].autoRadio.setChecked(True)
self.view().enableAutoRange(ViewBox.YAxis, val*0.01)
def yLinkComboChanged(self, ind):
self.view().setYLink(str(self.ctrl[1].linkCombo.currentText()))
def yAutoPanToggled(self, b):
self.view().setAutoPan(y=b)
def yVisibleOnlyToggled(self, b):
self.view().setAutoVisible(y=b)
def yInvertToggled(self, b):
self.view().invertY(b)
def xInvertToggled(self, b):
self.view().invertX(b)
def exportMethod(self):
act = self.sender()
self.exportMethods[str(act.text())]()
def set3ButtonMode(self):
self.view().setLeftButtonAction('pan')
def set1ButtonMode(self):
self.view().setLeftButtonAction('rect')
def setViewList(self, views):
names = ['']
self.viewMap.clear()
## generate list of views to show in the link combo
for v in views:
name = v.name
if name is None: ## unnamed views do not show up in the view list (although they are linkable)
continue
names.append(name)
self.viewMap[name] = v
for i in [0,1]:
c = self.ctrl[i].linkCombo
current = c.currentText()
c.blockSignals(True)
changed = True
try:
c.clear()
for name in names:
c.addItem(name)
if name == current:
changed = False
c.setCurrentIndex(c.count()-1)
finally:
c.blockSignals(False)
if changed:
c.setCurrentIndex(0)
c.currentIndexChanged.emit(c.currentIndex())
def _validateRangeText(self, axis):
"""Validate range text inputs. Return current value(s) if invalid."""
inputs = (self.ctrl[axis].minText.text(),
self.ctrl[axis].maxText.text())
vals = self.view().viewRange()[axis]
for i, text in enumerate(inputs):
try:
vals[i] = float(text)
except ValueError:
# could not convert string to float
pass
return vals
from .ViewBox import ViewBox
| 36.488971
| 159
| 0.575718
|
7a6893c1042bae71b9e36028980afb1556a13cd9
| 3,154
|
py
|
Python
|
zerver/management/commands/set_message_flags.py
|
alexandraciobica/zulip
|
f3753504469070bfccc73f22f933c87bee7d1852
|
[
"Apache-2.0"
] | 1
|
2019-10-01T14:04:07.000Z
|
2019-10-01T14:04:07.000Z
|
zerver/management/commands/set_message_flags.py
|
alexandraciobica/zulip
|
f3753504469070bfccc73f22f933c87bee7d1852
|
[
"Apache-2.0"
] | 3
|
2020-06-05T22:30:24.000Z
|
2022-02-10T19:04:47.000Z
|
zerver/management/commands/set_message_flags.py
|
alexandraciobica/zulip
|
f3753504469070bfccc73f22f933c87bee7d1852
|
[
"Apache-2.0"
] | 1
|
2020-01-06T15:12:36.000Z
|
2020-01-06T15:12:36.000Z
|
import logging
import sys
from typing import Any, Iterable
from django.core.management.base import CommandParser
from django.db import models
from zerver.lib import utils
from zerver.lib.management import ZulipBaseCommand, CommandError
from zerver.models import UserMessage
class Command(ZulipBaseCommand):
help = """Sets user message flags. Used internally by actions.py. Marks all
Expects a comma-delimited list of user message ids via stdin, and an EOF to terminate."""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('-l', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually change message flags. Default is a dry run.")
parser.add_argument('-f', '--flag',
dest='flag',
type=str,
help="The flag to add of remove")
parser.add_argument('-o', '--op',
dest='op',
type=str,
help="The operation to do: 'add' or 'remove'")
parser.add_argument('-u', '--until',
dest='all_until',
type=str,
help="Mark all messages <= specific usermessage id")
parser.add_argument('-m', '--email',
dest='email',
type=str,
help="Email to set messages for")
self.add_realm_args(parser)
def handle(self, *args: Any, **options: Any) -> None:
if not options["flag"] or not options["op"] or not options["email"]:
raise CommandError("Please specify an operation, a flag and an email")
op = options['op']
flag = getattr(UserMessage.flags, options['flag'])
all_until = options['all_until']
email = options['email']
realm = self.get_realm(options)
user_profile = self.get_user(email, realm)
if all_until:
filt = models.Q(id__lte=all_until)
else:
filt = models.Q(message__id__in=[mid.strip() for mid in sys.stdin.read().split(',')])
mids = [m.id for m in
UserMessage.objects.filter(filt, user_profile=user_profile).order_by('-id')]
if options["for_real"]:
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
def do_update(batch: Iterable[int]) -> None:
msgs = UserMessage.objects.filter(id__in=batch)
if op == 'add':
msgs.update(flags=models.F('flags').bitor(flag))
elif op == 'remove':
msgs.update(flags=models.F('flags').bitand(~flag))
if not options["for_real"]:
logging.info("Updating %s by %s %s" % (mids, op, flag))
logging.info("Dry run completed. Run with --for-real to change message flags.")
raise CommandError
utils.run_in_batches(mids, 400, do_update, sleep_time=3)
exit(0)
| 38.463415
| 97
| 0.541852
|
badf717febf46b89151210694acf8739d9a7e8d8
| 1,294
|
py
|
Python
|
polls/tests.py
|
longthanhtran/easy-django
|
b300090bcb530ee04264406bcc1586975e9fdeda
|
[
"MIT"
] | null | null | null |
polls/tests.py
|
longthanhtran/easy-django
|
b300090bcb530ee04264406bcc1586975e9fdeda
|
[
"MIT"
] | null | null | null |
polls/tests.py
|
longthanhtran/easy-django
|
b300090bcb530ee04264406bcc1586975e9fdeda
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils import timezone
from django.test import TestCase
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recentlty() should return False for question whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
| 35.944444
| 73
| 0.702473
|
351960ad99bf66061ebbb104bda66ea4d51f12d1
| 1,298
|
py
|
Python
|
management/commands/createspider.py
|
Zadigo/zineb
|
2addaa337600afec3c9696d77e0ccf3c5edc29da
|
[
"MIT"
] | 3
|
2021-10-17T20:37:40.000Z
|
2022-03-17T10:29:14.000Z
|
management/commands/createspider.py
|
Zadigo/zineb
|
2addaa337600afec3c9696d77e0ccf3c5edc29da
|
[
"MIT"
] | 4
|
2021-09-02T13:26:11.000Z
|
2022-03-16T12:26:36.000Z
|
management/commands/createspider.py
|
Zadigo/zineb
|
2addaa337600afec3c9696d77e0ccf3c5edc29da
|
[
"MIT"
] | null | null | null |
import os
from zineb import global_logger
from zineb.management.base import RequiresProjectError
from zineb.management.base import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('name', type=str, help='Name of your spider')
parser.add_argument('--type', type=str, help='The type of spider to create', default='http', choices=['http', 'file'])
def execute(self, namespace):
_, settings = self.preconfigure_project()
project_path = settings.PROJECT_PATH
if project_path is None:
raise RequiresProjectError()
spider_type = 'Zineb'
if namespace.type == 'file':
spider_type = 'FileCrawler'
path = os.path.join(project_path, 'spiders.py')
with open(path, mode='rb+') as f:
content = f.read()
base = f"""\n
\n
class {namespace.name}({spider_type}):
start_urls = []
def start(self, response, request, **kwargs):
pass
"""
f.write(bytes(base.encode('utf-8')))
global_logger.logger.info((f"{namespace.name} was succesfully created. "
"Do not forget to register the spider in order to run it."))
| 34.157895
| 126
| 0.595532
|
4e120a95ea2cd65f2243cd2b3995d9532b9c48fe
| 2,076
|
py
|
Python
|
073_setMatrixZeroes.py
|
stuti-rastogi/leetcode-python-solutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | 4
|
2018-07-24T08:36:42.000Z
|
2019-08-25T17:48:47.000Z
|
073_setMatrixZeroes.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
073_setMatrixZeroes.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix:
return
# use first row as "cols" array, use first column as "rows" array
# to do that, first store what needs to be done of first row/col
# i.e., do they contain zeroes
firstRowZeros = False
firstColZeros = False
m = len(matrix)
n = len(matrix[0])
for i in range(m):
if matrix[i][0] == 0:
firstColZeros = True
break
for j in range(n):
if matrix[0][j] == 0:
firstRowZeros = True
break
for i in range(1,m):
for j in range(1,n):
if matrix[i][j] == 0:
matrix[0][j] = 0
matrix[i][0] = 0
for i in range(1,m):
if matrix[i][0] == 0:
for j in range(1,n):
matrix[i][j] = 0
for j in range(1,n):
if matrix[0][j] == 0:
for i in range(1,m):
matrix[i][j] = 0
if firstRowZeros:
for j in range(n):
matrix[0][j] = 0
if firstColZeros:
for i in range(m):
matrix[i][0] = 0
return
############ O(m+n) space ############
# if not matrix:
# return
# m = len(matrix)
# n = len(matrix[0])
# # O(m+n) space
# rows = [False] * m
# cols = [False] * n
# for i in range(m):
# for j in range(n):
# if matrix[i][j] == 0:
# rows[i] = True
# cols[j] = True
# for i in range(m):
# if rows[i]:
# for j in range(n):
# matrix[i][j] = 0
# for j in range(n):
# if cols[j]:
# for i in range(m):
# matrix[i][j] = 0
# return
| 26.278481
| 73
| 0.391618
|
2889750ed71139faf081513559426248a6cf8f61
| 8,773
|
py
|
Python
|
ctl_adm.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | null | null | null |
ctl_adm.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | null | null | null |
ctl_adm.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | 14
|
2017-01-12T11:13:49.000Z
|
2019-04-19T10:02:50.000Z
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
import wx
try:
import adm
import logger
except:
print "ctl_adm: XRCED mode"
adm=None
class ComboBox(wx.ComboBox):
def __init__(self, parentWin, id=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ComboBox.__init__(self, parentWin, id, "", pos, size, style=style | wx.CB_DROPDOWN|wx.CB_READONLY)
self.keys={}
def InsertKey(self, pos, key, val):
wid=wx.ComboBox.Insert(self, val, pos, key)
self.keys[key] = wid
return wid
def AppendKey(self, key, val):
wid=wx.ComboBox.Append(self, val, key)
self.keys[key] = wid
return wid
def Append(self, stuff):
"""
Append(stuff)
stuff may be
- a dictionary
- a list of (key,val) tuples
- a (key,val) tuple
- a String
"""
wid=None
if isinstance(stuff, dict):
for key, val in stuff.items():
wid=self.AppendKey(key, val)
elif isinstance(stuff, list):
for data in stuff:
if isinstance(data, (tuple, list)):
wid=self.AppendKey(data[0], data[1])
elif isinstance(data, (str,unicode)):
wid=wx.ComboBox.Append(self, data)
self.SetClientData(wid, None)
else:
logger.debug("unknown type to append to combobox: %s %s", type(data), data)
elif isinstance(stuff, tuple):
wid=self.AppendKey(stuff[0], stuff[1])
elif isinstance(stuff, (str,unicode)):
wid=wx.ComboBox.Append(self, stuff)
self.SetClientData(wid, None)
else:
logger.debug("unknown type to append to combobox: %s %s", type(stuff), stuff)
return wid
def SetKeySelection(self, key):
id=self.keys.get(key)
if id != None:
return self.SetSelection(id)
return -1
def GetKeySelection(self):
id=self.GetSelection()
if id >= 0:
return self.GetClientData(id)
return None
class ListView(wx.ListView):
MARGIN=10
ICONWITDH=20
dlgConstant=None
def __init__(self, parentWin, defaultImageName="", id=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.LC_REPORT):
unused=style
style=wx.LC_REPORT
wx.ListView.__init__(self, parentWin, id, pos, size, style)
if adm:
self.SetImageList(adm.images, wx.IMAGE_LIST_SMALL)
self.defaultImageId=adm.images.GetId(defaultImageName)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.getToolTipTextProc=None
self.getToolTipCol=None
self.colInfos=[]
def ClearAll(self):
super(ListView, self).ClearAll()
self.coldef=[]
def GetToolTipText(self, id):
return self.GetItemText(id, self.getToolTipCol)
def GetSelection(self):
lst=[]
item=self.GetFirstSelected()
while item >= 0:
lst.append(item)
item=self.GetNextSelected(item)
return lst
def GetFocusText(self):
item=self.GetFocusedItem()
if item < 0: return None
return self.GetItemText(item, 0)
def SetSelectFocus(self, sel):
if sel == None: return
if not isinstance(sel, int):
sel=self.FindItem(0, sel)
if sel >= 0:
self.Focus(sel)
self.Select(sel)
def GetSelectionKeys(self):
lst=[]
item=self.GetFirstSelected()
while item >= 0:
lst.append(self.GetItemText(item, 0))
item=self.GetNextSelected(item)
return lst
def RegisterToolTipProc(self, proc):
if isinstance(proc, int):
self.getToolTipCol=proc
self.getToolTipTextProc=self.GetToolTipText
else:
self.getToolTipTextProc=proc
def convert(self, x):
if isinstance(x, (str, unicode)):
w,_h=self.GetTextExtent(x)
return w
if x < 0:
w,_h=self.GetSize()
for i in range(self.GetColumnCount()):
w -= self.GetColumnWidth(i)
if w<20:
w=20
return w
if not self.dlgConstant:
w,_h=self.GetTextExtent('Mg')
self.dlgConstant=w/2.
return int(float(x)*self.dlgConstant+.9)
class __ColumnExtractor:
def __init__(self, proc, colname):
def string(val):
if val == None:
return ""
return str(val)
self.colname=colname
if proc:
self.proc=proc
else:
self.proc=string
def GetVal(self, row):
if self.colname:
val=row[self.colname]
return self.proc(val)
return self.proc(row)
def AddExtractorInfo(self, colname=None, proc=None):
self.colInfos.append(ListView.__ColumnExtractor(proc, colname))
def AddColumnInfo(self, text, size=-1, colname=None, format=wx.LIST_FORMAT_LEFT, proc=None):
self.AddColumn(text, size, format)
self.AddExtractorInfo(colname, proc)
def AddColumn(self, text, size=-1, format=wx.LIST_FORMAT_LEFT):
if size in [None, -1, wx.LIST_AUTOSIZE]:
# size=wx.LIST_AUTOSIZE
size=self.GetClientSize().GetWidth();
for i in range(self.GetColumnCount()):
size -= self.GetColumnWidth(i)
elif size > 0:
size=self.convert(size) + self.MARGIN
if not self.GetColumnCount():
size += self.ICONWITDH
return self.InsertColumn(self.GetColumnCount(), text, format, size);
def CreateColumns(self, left, right=None, leftSize=-1):
if right != None:
if leftSize < 0:
leftSize=rightSize=self.GetClientSize().GetWidth()/2;
self.InsertColumn(0, left, wx.LIST_FORMAT_LEFT, leftSize);
self.InsertColumn(1, right - self.ICONWIDTH, wx.LIST_FORMAT_LEFT, rightSize);
else:
self.AddColumn(left, leftSize)
self.AddColumn(right, -1)
else:
self.AddColumn(left, -1)
def AppendRow(self, values, icon=-1):
vals=[]
for colInfo in self.colInfos:
vals.append(colInfo.GetVal(values))
_row=self.AppendItem(icon, vals)
def UpdateRow(self, row, values, icon=-1):
vals=[]
for colInfo in self.colInfos:
vals.append(colInfo.GetVal(values))
for col in range(1, self.GetColumnCount()):
self.SetStringItem(row, col, vals[col])
self.SetItemImage(row, icon)
def Fill(self, valueList, idCol=0):
"""
Fill(values, idCol=0)
Updates Listview contents with the rows list.
Each row contains a tuple of (columnValList, iconId) of only the columnValList
idCol identifies the column index in the columnValList which contains the key
"""
curRows = self.GetKeys()
for values in valueList:
if isinstance(values, tuple):
icon=values[1]
values=values[0]
else:
icon=-1
key=str(values[idCol])
if key in curRows:
curRows.remove(key)
row=self.FindItem(-1, key)
self.UpdateRow(row, values, icon)
else:
self.AppendRow(values, icon)
for key in curRows:
row=self.FindItem(-1, key)
if row >= 0:
self.DeleteItem(row)
def InsertItem(self, row, icon, vals):
if icon < 0:
icon=self.defaultImageId
if isinstance(vals, tuple):
vals=list(vals)
elif not isinstance(vals, list):
vals=[vals]
if row < 0:
row=self.GetItemCount()
row=self.InsertStringItem(row, unicode(vals[0]), icon)
for col in range(1, len(vals)):
val=vals[col]
if val == None:
val=""
val=unicode(val)
self.SetStringItem(row, col, val);
return row
def AppendItem(self, icon, vals):
return self.InsertItem(self.GetItemCount(), icon, vals)
def GetKeys(self):
l=[]
for i in range(self.GetItemCount()):
l.append(self.GetItemText(i, 0))
return l
def GetValue(self):
l=[]
for i in range(self.GetItemCount()):
l.append(self.GetItemTuple(i))
return l
def SetItem(self, row, val, image=None):
if isinstance(val, tuple):
val=list(val)
for col in range(len(val)):
self.SetStringItem(row, col, unicode(val[col]))
if image != None:
self.SetItemImage(row, image)
def GetItemTuple(self, row):
if row < 0 or row >= self.GetItemCount():
return None
l=[]
for col in range(self.GetColumnCount()):
l.append(self.GetItemText(row, col))
return tuple(l)
def GetItemText(self, row, col):
if row < 0 or row >= self.GetItemCount():
return None
if col < 0 or col >= self.GetColumnCount():
return None
return self.GetItem(row, col).GetText()
def GetColname(self, col):
return self.GetColumn(col).GetText()
def OnMouseMove(self, ev):
if self.getToolTipTextProc:
id, unused_flags=self.HitTest(ev.GetPosition())
if id < 0:
self.SetToolTipString("")
else:
self.SetToolTipString(self.getToolTipTextProc(id))
xmlControlList={ 'whListView': ListView,
'whComboBox': ComboBox,
}
| 26.345345
| 125
| 0.631141
|
3725ceb5b572ebe8b8ea97d8fd5c3c6538df7c55
| 8,171
|
py
|
Python
|
stock-tools/quote_change.py
|
Muyoo/blueberry-tools
|
cac16cfd39c51d22efd6c8c3d40c22c3b84cb100
|
[
"Apache-2.0"
] | null | null | null |
stock-tools/quote_change.py
|
Muyoo/blueberry-tools
|
cac16cfd39c51d22efd6c8c3d40c22c3b84cb100
|
[
"Apache-2.0"
] | null | null | null |
stock-tools/quote_change.py
|
Muyoo/blueberry-tools
|
cac16cfd39c51d22efd6c8c3d40c22c3b84cb100
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf8
import psycopg2 as pg
import json
import pdb
import sys
import time
import requests
import re
from datetime import datetime
STOCK_LIST_SQL = '''
SELECT code, name FROM stock_code
WHERE (code LIKE '6%' OR code LIKE '00%')
-- WHERE code = '002498'
AND name NOT LIKE '*ST%'
'''
CHANGE_SQL_FMT = '''
SELECT date_trunc('day', record_time) AS "day", value
FROM stock_k_data
WHERE record_time >= to_timestamp(1262275200)
AND metric = 'pct_chg' AND tag_stock_code(tags) = '%s'
ORDER BY "day" ASC
'''
PCT_CHANGE_THRESHOLD = 9.0
PRICE_THRESHOLD = 40.0
PROBABILITY_THRESHOLD = 0.85
STATISTIC_BASIC_COUNT = 15
def load_from_csv(filename):
print('Loading stocks history k-data...')
records_dic = {}
with open(filename, 'r') as reader:
title = reader.readline().strip().split(',')[1:]
pct_chg_idx = title.index('pct_chg')
open_idx = title.index('open')
code_idx = title.index('ts_code')
is_title = True
for line in reader:
if is_title:
is_title = False
continue
items = line.strip().split(',')[1:]
pct_chg = float(items[pct_chg_idx])
code = items[code_idx]
open_price = float(items[open_idx])
if code not in records_dic:
records_dic[code] = []
records_dic[code].append((pct_chg, open_price))
print('Complated loading history k-data')
return records_dic
def d3_d4_change(filename, output_filename):
records_dic = load_from_csv(filename)
values_list = []
for code, records in records_dic.items():
valid_stock_code = (code.startswith('6') and not code.startswith('68')) or code.startswith('00') #or code.startswith('30')
if not valid_stock_code:
continue
total = 0
d3 = 0
d4 = 0
i = 0
length = len(records)
for change, open_price in records:
if change < PCT_CHANGE_THRESHOLD:
i += 1
continue
pre_change, _ = records[i - 1]
if pre_change <= 0:
i += 1
continue
total += 1
if i + 1 < length:
d_3_change, _ = records[i+1]
if d_3_change > 0:
d3 += 1
else:
break
if i + 2 < length:
d_4_change, _ = records[i+2]
if d_4_change > 0:
d4 += 1
else:
break
i += 1
values_list.append((code, total, d3, d4))
sorted_list = sorted(
values_list, key=lambda x: x[2]/x[1] if x[1] != 0 else 0, reverse=True)
with open(output_filename, 'w') as writer:
for val in sorted_list:
if val[1] == 0:
continue
writer.write('%s %s %s %s %s\n' %
(val[0], val[2]/val[1], val[1], val[2], val[3]))
return records_dic
# 3日交易
# 分母:涨停日 D,D-1 涨幅 (0, 0.1),D+1 涨跌幅 (-0.1, 0.1)
# 分子:涨停日 D,D-1 涨幅 (0, 0.1),D+1 涨幅 (0, 0.1)
# def d3_d4_change_from_db():
# conn = pg.connect('dbname=blueberry user=postgres password=123456')
# cursor = conn.cursor()
# cursor.execute(STOCK_LIST_SQL)
# stats_dic = {}
# stock_stats_list = []
# for stock_info in cursor.fetchall():
# code, name = stock_info
# change_sql = CHANGE_SQL_FMT % code
# cursor.execute(change_sql)
# key = '%s-%s' % stock_info
# stats_dic[key] = {'total': 0, 'd_3': 0, 'd_4': 0}
# change_value_list = cursor.fetchall()
# length = len(change_value_list)
# i = 0
# for record in change_value_list:
# day, change = record
# if change < 9.9:
# i += 1
# continue
# pre_day, pre_change = change_value_list[i - 1]
# if pre_change <= 0:
# i += 1
# continue
# stats_dic[key]['total'] += 1
# if i + 1 < length:
# d_3_day, d_3_change = change_value_list[i+1]
# if d_3_change > 0:
# stats_dic[key]['d_3'] += 1
# else:
# break
# if i + 2 < length:
# d_4_day, d_4_change = change_value_list[i+2]
# if d_4_change > 0:
# stats_dic[key]['d_4'] += 1
# else:
# break
# i += 1
# cursor.close()
# conn.close()
# print(json.dumps(stats_dic))
# total = 0
# d_3 = 0
# d_4 = 0
# for key, stats in stats_dic.items():
# total += stats['total']
# d_3 += stats['d_3']
# d_4 += stats['d_4']
# print(total, d_3, d_4)
# From 2000-01-01 to 2020-07-10: Total = 55869, d_3 = 37984, d_4 = 30125
def filter_change_stocks(pct_chg_file, output_filename):
writer = open(output_filename, 'w')
with open(pct_chg_file, 'r') as reader:
for line in reader:
items = line.strip().split()
code, probability, total, d3, d4 = items
if float(probability) > PROBABILITY_THRESHOLD and int(total) >= STATISTIC_BASIC_COUNT:
writer.write('%s\n' % code)
writer.close()
def pick_up_stocks(records_dic, filtered_change_filename):
print('To pick up stocks from: ')
filtered_codes_set = set([line.strip()
for line in open(filtered_change_filename, 'r')])
writer = open('picked_up.stocks', 'w')
for code, records in records_dic.items():
if code not in filtered_codes_set:
continue
last_n_days = records[-2:]
print('\t', code, last_n_days)
pre_pct_change, _ = last_n_days[0]
pct_change, open_price = last_n_days[1]
if pre_pct_change > 0.0 and pct_change >= PCT_CHANGE_THRESHOLD:
if open_price <= PRICE_THRESHOLD:
writer.write('%s\n' % code)
writer.close()
output_filename = './pct_chg_sorted.stats'
filtered_change_filename = './filtered_change.stats'
def run_train(kdata_filename):
pct_change_dic = d3_d4_change(kdata_filename, output_filename)
filter_change_stocks(output_filename, filtered_change_filename)
pick_up_stocks(pct_change_dic, filtered_change_filename)
# candidates = [code.strip() for code in open(filter_change_stocks, 'r')]
def run_monitor():
regx_pattern = re.compile('"(.*)"')
url_fmt = 'http://hq.sinajs.cn/list=%s'
candidates = set([])
with open(filtered_change_filename, 'r') as reader:
for line in reader:
code, exchange = line.strip().split('.')
candidates.add('%s%s' % (exchange.lower(), code))
while True:
hour = datetime.now().strftime('%H')
if hour > '16':
print('Marcket is closed. Exit.')
break
for stock in candidates:
stock_url = url_fmt % stock
data = requests.get(stock_url).text.strip()
data = regx_pattern.search(data).groups()[0]
name, open_price, pre_close_price, current_price = data.split(',')[
:4]
if float(pre_close_price) <= 0 or float(open_price) <= 0:
continue
pre_change = (float(pre_close_price) -
float(open_price)) / float(pre_close_price)
if pre_change > 0:
current_change = 100 * \
(float(current_price) - float(open_price)) / float(open_price)
if current_change > 7:
print(name, stock, current_price, current_change)
time.sleep(5)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(
'Usage: \n\t1. quote_change.py train k-data.csv\n\t2. quote_change.py monitor')
exit(1)
mode = sys.argv[1]
if mode == 'train':
run_train(sys.argv[2])
elif mode == 'monitor':
run_monitor()
else:
print(
'Usage: \n\t1. quote_change.py train k-data.csv\n\t2. quote_change.py monitor')
| 29.712727
| 130
| 0.546445
|
0659aac7eb3787313a2eda638e09e305c6fab4de
| 12,437
|
py
|
Python
|
tests/test_dplyr_slice.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | null | null | null |
tests/test_dplyr_slice.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | null | null | null |
tests/test_dplyr_slice.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | null | null | null |
# tests grabbed from:
# https://github.com/tidyverse/dplyr/blob/master/tests/testthat/test-slice.r
from pandas.testing import assert_frame_equal
from pipda.context import ContextError
import pytest
from datar.core.grouped import DataFrameRowwise
from datar.all import *
from datar.datasets import mtcars
from datar.dplyr.dslice import _n_from_prop
def test_empty_slice_returns_input():
df = tibble(x=[1,2,3])
assert slice(df).equals(df)
def test_slice_handles_numeric_input():
g = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
res = g >> slice(1)
assert nrow(res) == 3
exp = g >> filter(row_number() == 1)
assert_frame_equal(res, exp)
res1 = mtcars >> slice(1)
res2 = mtcars >> filter(row_number() == 1)
assert_frame_equal(res1, res2)
def test_slice_silently_ignores_out_of_range_values():
res1 = slice(mtcars, c(2, 100))
res2 = slice(mtcars, 2)
assert_frame_equal(res1, res2)
g = group_by(mtcars, f.cyl)
res1 = slice(g, c(2, 100))
res2 = slice(g, 2)
assert_frame_equal(res1, res2)
def test_slice_works_with_negative_indices():
res = slice(mtcars, ~f[:2])
exp = tail(mtcars, -2)
assert_frame_equal(res, exp)
def test_slice_works_with_grouped_data():
g = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
res = slice(g, f[:2])
exp = filter(g, row_number() < 3)
assert_frame_equal(res, exp)
res = slice(g, ~f[:2])
exp = filter(g, row_number() >= 3)
assert res.equals(exp)
g = group_by(tibble(x=c(1,1,2,2,2)), f.x)
out = group_keys(slice(g, 3, _preserve=True)) >> pull(f.x, to='list')
assert out == [1,2]
out = group_keys(slice(g, 3, _preserve=False)) >> pull(f.x, to='list')
assert out == [2]
def test_slice_gives_correct_rows():
a = tibble(value=[f"row{i}" for i in range(1,11)])
out = slice(a, c(1,2,3)) >> pull(f.value, to='list')
assert out == ['row1', 'row2', 'row3']
out = slice(a, c(4,6,9)) >> pull(f.value, to='list')
assert out == ['row4', 'row6', 'row9']
a = tibble(
value=[f"row{i}" for i in range(1,11)],
group=rep([1,2], each=5)
) >> group_by(f.group)
out = slice(a, f[1:3]) >> pull(f.value, to='list')
assert out == [f'row{i}' for i in [1,2,3, 6,7,8]]
out = slice(a, c(2,4)) >> pull(f.value, to='list')
assert out == [f'row{i}' for i in [2,4,7,9]]
def test_slice_handles_na():
df = tibble(x=[1,2,3])
assert nrow(slice(df, NA)) == 0
assert nrow(slice(df, c(1, NA))) == 1
out = df >> slice(c(~c(1), NA)) >> nrow()
assert out == 2
df = tibble(x=[1,2,3,4], g=rep([1,2], 2)) >> group_by(f.g)
assert nrow(slice(df, c(1, NA))) == 2
out = df >> slice(c(~c(1), NA)) >> nrow()
assert out == 2
def test_slice_handles_logical_NA():
df = tibble(x=[1,2,3])
assert nrow(slice(df, NA)) == 0
def test_slice_handles_empty_df():
df = tibble(x=[])
res = df >> slice(f[:3])
assert nrow(res) == 0
assert names(res) == ["x"]
def test_slice_works_fine_if_n_gt_nrow():
by_slice = mtcars >> arrange(f.cyl) >> group_by(f.cyl)
slice_res = by_slice >> slice(8)
filter_res = by_slice >> group_by(f.cyl) >> filter(row_number() == 8)
assert slice_res.equals(filter_res)
def test_slice_strips_grouped_indices():
res = mtcars >> group_by(f.cyl) >> slice(1) >> mutate(mpgplus=f.mpg+1)
assert nrow(res) == 3
assert group_rows(res) == [[0], [1], [2]]
def test_slice_works_with_0col_dfs():
out = tibble(a=[1,2,3]) >> select(~f.a) >> slice(1) >> nrow()
assert out == 1
def test_slice_correctly_computes_positive_indices_from_negative_indices():
x = tibble(y=range(1,11))
# negative in dplyr meaning exclusive
assert slice(x, ~f[10:30]).equals(tibble(y=range(1,10)))
def test_slice_accepts_star_args():
out1 = slice(mtcars, 1, 2)
out2 = slice(mtcars, [1,2])
assert out1.equals(out2)
out3 = slice(mtcars, 1, n())
out4 = slice(mtcars, c(1, nrow(mtcars)))
assert out3.equals(out4)
g = mtcars >> group_by(f.cyl)
out5 = slice(g, 1, n())
out6 = slice(g, c(1, n()))
assert out5.equals(out6)
def test_slice_does_not_evaluate_the_expression_in_empty_groups():
res = mtcars >> \
group_by(f.cyl) >> \
filter(f.cyl==6) >> \
slice(f[:2])
assert nrow(res) == 2
# sample_n is Superseded in favor of slice_sample
# res = mtcars >> \
# group_by(f.cyl) >> \
# filter(f.cyl==6) >> \
# sample_n(size=3)
# assert nrow(res) == 3
def test_slice_handles_df_columns():
df = tibble(x=[1,2], y=tibble(a=[1,2], b=[3,4]), z=tibble(A=[1,2], B=[3,4]))
out = slice(df, 1)
assert out.equals(df.iloc[[0], :])
gdf = group_by(df, f.x)
assert slice(gdf, 1).equals(gdf)
# TODO: group_by a stacked df is not supported yet
gdf = group_by(df, f['y$a'], f['y$b'])
assert slice(gdf, 1).equals(gdf)
gdf = group_by(df, f['z$A'], f['z$B'])
assert slice(gdf, 1).equals(gdf)
# # Slice variants ----------------------------------------------------------
def test_functions_silently_truncate_results():
df = tibble(x=range(1,6))
out = df >> slice_head(n=6) >> nrow()
assert out == 5
out = df >> slice_tail(n=6) >> nrow()
assert out == 5
out = df >> slice_sample(n=6) >> nrow()
assert out == 5
out = df >> slice_min(f.x, n=6) >> nrow()
assert out == 5
out = df >> slice_max(f.x, n=6) >> nrow()
assert out == 5
def test_proportion_computed_correctly():
df = tibble(x=range(1,11))
out = df >> slice_head(prop=.11) >> nrow()
assert out == 1
out = df >> slice_tail(prop=.11) >> nrow()
assert out == 1
out = df >> slice_sample(prop=.11) >> nrow()
assert out == 1
out = df >> slice_min(f.x, prop=.11) >> nrow()
assert out == 1
out = df >> slice_max(f.x, prop=.11) >> nrow()
assert out == 1
out = df >> slice_max(f.x, prop=.11, with_ties=False) >> nrow()
assert out == 1
out = df >> slice_min(f.x, prop=.11, with_ties=False) >> nrow()
assert out == 1
def test_min_and_max_return_ties_by_default():
df = tibble(x=c(1,1,1,2,2))
out = df >> slice_min(f.x) >> nrow()
assert out == 3
out = df >> slice_max(f.x) >> nrow()
assert out == 2
out = df >> slice_min(f.x, with_ties=False) >> nrow()
assert out == 1
out = df >> slice_max(f.x, with_ties=False) >> nrow()
assert out == 1
def test_min_and_max_reorder_results():
df = tibble(id=range(1,5), x=c(2,3,1,2))
out = df >> slice_min(f.x, n=2) >> pull(f.id, to='list')
assert out == [3,1,4]
out = df >> slice_min(f.x, n=2, with_ties=False) >> pull(f.id, to='list')
assert out == [3,1]
out = df >> slice_max(f.x, n=2) >> pull(f.id, to='list')
assert out == [2,1,4]
out = df >> slice_max(f.x, n=2, with_ties=False) >> pull(f.id, to='list')
assert out == [2,1]
def test_min_and_max_ignore_nas():
df = tibble(
id=range(1,5),
x=c(2,NA,1,2),
y=[NA]*4
)
out = df >> slice_min(f.x, n=2) >> pull(f.id, to='list')
assert out == [3,1,4]
out = df >> slice_min(f.y, n=2) >> nrow()
assert out == 0
out = df >> slice_max(f.x, n=2) >> pull(f.id, to='list')
assert out == [1,4]
out = df >> slice_max(f.y, n=2) >> nrow()
assert out == 0
def test_arguments_to_sample_are_passed_along():
df = tibble(x=range(1,101), wt=c(1, rep(0, 99)))
out = df >> slice_sample(n=1, weight_by=f.wt) >> pull(f.x, to='list')
assert out == [1]
out = df >> slice_sample(n=2, weight_by=f.wt, replace=True) >> pull(f.x, to='list')
assert out == [1,1]
def test_slice_any_checks_for_empty_args_kwargs():
df = tibble(x=range(1,11))
# python recognize n=5
# with pytest.raises(ValueError):
# slice_head(df, 5)
# with pytest.raises(ValueError):
# slice_tail(df, 5)
with pytest.raises(TypeError):
df >> slice_min(n=5)
with pytest.raises(TypeError):
df >> slice_max(n=5)
# with pytest.raises(ValueError):
# slice_sample(df, 5)
def test_slice_any_checks_for_constant_n_and_prop():
df = tibble(x=range(1,11))
with pytest.raises(ContextError):
slice_head(df, n=f.x) # ok with n()
with pytest.raises(ContextError):
slice_head(df, prop=f.x)
with pytest.raises(ContextError):
slice_tail(df, n=f.x)
with pytest.raises(ContextError):
slice_tail(df, prop=f.x)
with pytest.raises(ContextError):
slice_min(df, f.x, n=f.x)
with pytest.raises(ContextError):
slice_min(df, f.x, prop=f.x)
with pytest.raises(ContextError):
slice_max(df, f.x, n=f.x)
with pytest.raises(ContextError):
slice_max(df, f.x, prop=f.x)
with pytest.raises(ContextError):
slice_sample(df, n=f.x)
with pytest.raises(ContextError):
slice_sample(df, prop=f.x)
def test_slice_sample_dose_not_error_on_0rows():
df = tibble(dummy=[], weight=[])
res = slice_sample(df, prop=.5, weight_by=f.weight)
assert nrow(res) == 0
# # Errors ------------------------------------------------------------------
def test_rename_errors_with_invalid_grouped_df():
df = tibble(x=[1,2,3])
# Incompatible type
with pytest.raises(TypeError):
slice(df, object())
with pytest.raises(TypeError):
slice(df, {'a': 1})
# Mix of positive and negative integers
with pytest.raises(ValueError):
mtcars >> slice(c(~c(1), 2))
with pytest.raises(ValueError):
mtcars >> slice(c(f[2:4], ~c(1)))
# n and prop are carefully validated
# with pytest.raises(ValueError):
# _n_from_prop(10, n=1, prop=1)
with pytest.raises(TypeError):
_n_from_prop(10, n="a")
with pytest.raises(TypeError):
_n_from_prop(10, prop="a")
with pytest.raises(ValueError):
_n_from_prop(10, n=-1)
with pytest.raises(ValueError):
_n_from_prop(10, prop=-1)
with pytest.raises(TypeError):
_n_from_prop(10, n=n())
with pytest.raises(TypeError):
_n_from_prop(10, prop=n())
## tests for datar
def test_mixed_rows():
df = tibble(x=range(5))
# order kept
# 0 1 2 3 4
# -3 -1
# 3 # 1-based
out = slice(df, c(-c(1,3), 4)) >> pull(f.x, to='list')
assert out == [2, 4, 3]
# 0 1 2 3 4
# -2 -1
# 4
out = slice(df, c(-f[:2], 4)) >> pull(f.x, to='list')
assert out == [3, 4]
# 0 1 2 3 4
# 1 3
# -1
out = slice(df, c(~c(1,3), ~c(-1))) >> pull(f.x, to='list')
assert out == [1, 3]
out = df >> slice(c(~f[3:], ~c(1))) >> pull(f.x, to='list')
assert out == [1]
def test_slice_sample_n_defaults_to_1():
df = tibble(
g = rep([1,2], each=3),
x = seq(1,6)
)
out = df >> slice_sample(n=None)
assert dim(out) == (1, 2)
def test_slicex_on_grouped_data():
gf = tibble(
g = rep([1,2], each=3),
x = seq(1,6)
) >> group_by(f.g)
out = gf >> slice_min(f.x)
assert out.equals(tibble(g=[1,2], x=[1,4]))
out = gf >> slice_max(f.x)
assert out.equals(tibble(g=[1,2], x=[3,6]))
out = gf >> slice_sample()
assert dim(out) == (2, 2)
def test_n_from_prop():
assert _n_from_prop(1, prop=.5) == 0
assert _n_from_prop(2, prop=.5) == 1
assert _n_from_prop(4, prop=.5) == 2
# slice_head/tail on grouped data
def test_slice_head_tail_on_grouped_data():
df = tibble(g=[1,1,1,2,2,2], x=[1,2,3,4,5,6]) >> group_by(f.g)
out = slice_head(df, 1) >> ungroup()
assert_frame_equal(out, tibble(g=[1,2], x=[1,4]))
out = slice_tail(df, 1) >> ungroup()
assert_frame_equal(out, tibble(g=[1,2], x=[3,6]))
def test_slice_family_on_rowwise_df():
df = tibble(x=f[1:6]) >> rowwise()
out = df >> slice([1,2,3])
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_head(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_tail(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_min(f.x, n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_max(f.x, n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
out = df >> slice_sample(n=3)
assert isinstance(out, DataFrameRowwise)
assert nrow(out) == 3
| 30.55774
| 87
| 0.58133
|
de4fc6cd225b5238ec6567c4899b02feb68e9cda
| 8,915
|
py
|
Python
|
gammapy/cube/make.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/cube/make.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/cube/make.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
from astropy.nddata.utils import NoOverlapError
from astropy.coordinates import Angle
from ..maps import Map, WcsGeom
from .counts import fill_map_counts
from .exposure import make_map_exposure_true_energy, _map_spectrum_weight
from .background import make_map_background_irf
__all__ = ["MapMaker", "MapMakerObs"]
log = logging.getLogger(__name__)
class MapMaker:
"""Make maps from IACT observations.
Parameters
----------
geom : `~gammapy.maps.WcsGeom`
Reference image geometry in reco energy
offset_max : `~astropy.coordinates.Angle`
Maximum offset angle
geom_true : `~gammapy.maps.WcsGeom`
Reference image geometry in true energy, used for exposure maps and PSF.
If none, the same as geom is assumed
exclusion_mask : `~gammapy.maps.Map`
Exclusion mask
"""
def __init__(self, geom, offset_max, geom_true=None, exclusion_mask=None):
if not isinstance(geom, WcsGeom):
raise ValueError("MapMaker only works with WcsGeom")
if geom.is_image:
raise ValueError("MapMaker only works with geom with an energy axis")
self.geom = geom
self.geom_true = geom_true if geom_true else geom
self.offset_max = Angle(offset_max)
self.maps = {}
# Some background estimation methods need an exclusion mask.
if exclusion_mask is not None:
self.maps["exclusion"] = exclusion_mask
def run(self, observations, selection=None):
"""
Run MapMaker for a list of observations to create
stacked counts, exposure and background maps
Parameters
--------------
observations : `~gammapy.data.Observations`
Observations to process
selection : list
List of str, selecting which maps to make.
Available: 'counts', 'exposure', 'background'
By default, all maps are made.
Returns
-----------
maps: dict of stacked counts, background and exposure maps.
"""
selection = _check_selection(selection)
# Initialise zero-filled maps
for name in selection:
if name == "exposure":
self.maps[name] = Map.from_geom(self.geom_true, unit="m2 s")
else:
self.maps[name] = Map.from_geom(self.geom, unit="")
for obs in observations:
try:
self._process_obs(obs, selection)
except NoOverlapError:
log.info(
"Skipping observation {}, no overlap with map.".format(obs.obs_id)
)
continue
return self.maps
def _process_obs(self, obs, selection):
# Compute cutout geometry and slices to stack results back later
cutout_map = Map.from_geom(self.geom).cutout(
position=obs.pointing_radec, width=2 * self.offset_max, mode="trim"
)
cutout_map_etrue = Map.from_geom(self.geom_true).cutout(
position=obs.pointing_radec, width=2 * self.offset_max, mode="trim"
)
log.info("Processing observation: OBS_ID = {}".format(obs.obs_id))
# Compute field of view mask on the cutout
coords = cutout_map.geom.get_coord()
offset = coords.skycoord.separation(obs.pointing_radec)
fov_mask = offset >= self.offset_max
# Compute field of view mask on the cutout in true energy
coords_etrue = cutout_map_etrue.geom.get_coord()
offset_etrue = coords_etrue.skycoord.separation(obs.pointing_radec)
fov_mask_etrue = offset_etrue >= self.offset_max
# Only if there is an exclusion mask, make a cutout
# Exclusion mask only on the background, so only in reco-energy
exclusion_mask = self.maps.get("exclusion", None)
if exclusion_mask is not None:
exclusion_mask = exclusion_mask.cutout(
position=obs.pointing_radec, width=2 * self.offset_max, mode="trim"
)
# Make maps for this observation
maps_obs = MapMakerObs(
observation=obs,
geom=cutout_map.geom,
geom_true=cutout_map_etrue.geom,
fov_mask=fov_mask,
fov_mask_etrue=fov_mask_etrue,
exclusion_mask=exclusion_mask,
).run(selection)
# Stack observation maps to total
for name in selection:
data = maps_obs[name].quantity.to_value(self.maps[name].unit)
if name == "exposure":
self.maps[name].fill_by_coord(coords_etrue, data)
else:
self.maps[name].fill_by_coord(coords, data)
def make_images(self, spectrum=None, keepdims=False):
"""Create images by summing over the energy axis.
Exposure is weighted with an assumed spectrum,
resulting in a weighted mean exposure image.
Parameters
----------
spectrum : `~gammapy.spectrum.models.SpectralModel`
Spectral model to compute the weights.
Default is power-law with spectral index of 2.
keepdims : bool, optional
If this is set to True, the energy axes is kept with a single bin.
If False, the energy axes is removed
Returns
-------
images : dict of `~gammapy.maps.Map`
"""
images = {}
for name, map in self.maps.items():
if name == "exposure":
map = _map_spectrum_weight(map, spectrum)
images[name] = map.sum_over_axes(keepdims=keepdims)
return images
class MapMakerObs:
"""Make maps for a single IACT observation.
Parameters
----------
observation : `~gammapy.data.DataStoreObservation`
Observation
geom : `~gammapy.maps.WcsGeom`
Reference image geometry
geom_true : `~gammapy.maps.WcsGeom`
Reference image geometry in true energy, used for exposure maps and PSF.
If none, the same as geom is assumed
fov_mask : `~numpy.ndarray`
Mask to select pixels in field of view
exclusion_mask : `~gammapy.maps.Map`
Exclusion mask (used by some background estimators)
"""
def __init__(
self,
observation,
geom,
geom_true=None,
fov_mask=None,
fov_mask_etrue=None,
exclusion_mask=None,
):
self.observation = observation
self.geom = geom
self.geom_true = geom_true if geom_true else geom
self.fov_mask = fov_mask
self.fov_mask_etrue = fov_mask_etrue
self.exclusion_mask = exclusion_mask
self.maps = {}
def run(self, selection=None):
"""Make maps.
Returns dict with keys "counts", "exposure" and "background".
Parameters
----------
selection : list
List of str, selecting which maps to make.
Available: 'counts', 'exposure', 'background'
By default, all maps are made.
"""
selection = _check_selection(selection)
for name in selection:
getattr(self, "_make_" + name)()
return self.maps
def _make_counts(self):
counts = Map.from_geom(self.geom)
fill_map_counts(counts, self.observation.events)
if self.fov_mask is not None:
counts.data[..., self.fov_mask] = 0
self.maps["counts"] = counts
def _make_exposure(self):
exposure = make_map_exposure_true_energy(
pointing=self.observation.pointing_radec,
livetime=self.observation.observation_live_time_duration,
aeff=self.observation.aeff,
geom=self.geom_true,
)
if self.fov_mask_etrue is not None:
exposure.data[..., self.fov_mask_etrue] = 0
self.maps["exposure"] = exposure
def _make_background(self):
background = make_map_background_irf(
pointing=self.observation.pointing_radec,
ontime=self.observation.observation_time_duration,
bkg=self.observation.bkg,
geom=self.geom,
)
if self.fov_mask is not None:
background.data[..., self.fov_mask] = 0
# TODO: decide what background modeling options to support
# Extra things like FOV norm scale or ring would go here.
self.maps["background"] = background
def _check_selection(selection):
"""Handle default and validation of selection"""
available = ["counts", "exposure", "background"]
if selection is None:
selection = available
if not isinstance(selection, list):
raise TypeError("Selection must be a list of str")
for name in selection:
if name not in available:
raise ValueError("Selection not available: {!r}".format(name))
return selection
| 33.641509
| 86
| 0.61963
|
f7e6e9e294e27bf952bce884d4c77681a9103fc7
| 313
|
py
|
Python
|
tests/conftest.py
|
predakanga/resolve
|
80400b4b132297c04bd09a1448b0c4d78ba4ed1f
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
predakanga/resolve
|
80400b4b132297c04bd09a1448b0c4d78ba4ed1f
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
predakanga/resolve
|
80400b4b132297c04bd09a1448b0c4d78ba4ed1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for resolve.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
from __future__ import print_function, absolute_import, division
import pytest
| 24.076923
| 64
| 0.702875
|
23394f026cfdb39e4a6ac25e7cd5cf8a8f379462
| 1,821
|
py
|
Python
|
paddlex/tools/split.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 3,655
|
2020-03-28T09:19:50.000Z
|
2022-03-31T13:28:39.000Z
|
paddlex/tools/split.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 829
|
2020-03-28T04:03:18.000Z
|
2022-03-31T14:34:30.000Z
|
paddlex/tools/split.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 738
|
2020-03-28T03:56:46.000Z
|
2022-03-31T13:11:03.000Z
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dataset_split.coco_split import split_coco_dataset
from .dataset_split.voc_split import split_voc_dataset
from .dataset_split.imagenet_split import split_imagenet_dataset
from .dataset_split.seg_split import split_seg_dataset
def dataset_split(dataset_dir, dataset_format, val_value, test_value,
save_dir):
if dataset_format == "coco":
train_num, val_num, test_num = split_coco_dataset(
dataset_dir, val_value, test_value, save_dir)
elif dataset_format == "voc":
train_num, val_num, test_num = split_voc_dataset(
dataset_dir, val_value, test_value, save_dir)
elif dataset_format == "seg":
train_num, val_num, test_num = split_seg_dataset(
dataset_dir, val_value, test_value, save_dir)
elif dataset_format == "imagenet":
train_num, val_num, test_num = split_imagenet_dataset(
dataset_dir, val_value, test_value, save_dir)
print("Dataset Split Done.")
print("Train samples: {}".format(train_num))
print("Eval samples: {}".format(val_num))
print("Test samples: {}".format(test_num))
print("Split files saved in {}".format(save_dir))
| 43.357143
| 74
| 0.727622
|
435be635d2b6912b837bf2ee60756870e38d5a7f
| 1,423
|
py
|
Python
|
src/robotican_demos_upgrade/script/robot_navigation_client.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
src/robotican_demos_upgrade/script/robot_navigation_client.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
src/robotican_demos_upgrade/script/robot_navigation_client.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
from robotican_demos_upgrade.srv import *
def push_button_client():
rospy.wait_for_service('robot_navigation')
try:
robot_navigation_req = rospy.ServiceProxy('robot_navigation', robot_navigation)
resp1 = robot_navigation_req("corridor", "armadillo", "location1", "location2", "floor")
print("responding!")
return resp1.response
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
if __name__ == "__main__":
resp = push_button_client()
print('The response is: ', resp)
if(resp == "success" or resp == "failure"):
print("pushed the button successfully!!")
sys.exit()
'''
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
from robotican_demos_upgrade.srv import *
def push_button():
rospy.wait_for_service('push_button')
try:
push_button_req = rospy.ServiceProxy('push_button', push_button)
resp1.response = push_button_req("push_button", "armadillo", "elevator", "floor", "button")
print(resp1.response)
return resp1.response
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
if __name__ == "__main__":
push_button()
#if(resp == "success" or resp == "failure"):
# sys.exit()
'''
| 27.901961
| 107
| 0.662684
|
0b1510496a86788d974a0c79fb2762ea250a12f3
| 1,777
|
py
|
Python
|
discussion/models.py
|
ThusharaX/mumbleapi
|
8435fe9d86869cce81961f42c9860fa3810c171b
|
[
"Apache-2.0"
] | 187
|
2021-04-24T14:49:44.000Z
|
2022-03-31T14:25:22.000Z
|
discussion/models.py
|
shukl08vk/mumbleapi
|
101825d8aecba7eac4e31046e7b4b15b36c55f77
|
[
"Apache-2.0"
] | 119
|
2021-04-24T18:08:43.000Z
|
2022-01-09T00:57:19.000Z
|
discussion/models.py
|
shukl08vk/mumbleapi
|
101825d8aecba7eac4e31046e7b4b15b36c55f77
|
[
"Apache-2.0"
] | 174
|
2021-04-24T15:57:23.000Z
|
2022-03-11T02:09:04.000Z
|
from django.db import models
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
from users.models import TopicTag
import uuid
class Discussion(models.Model):
id = models.UUIDField(default=uuid.uuid4, unique=True, primary_key=True, editable=False)
user = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True)
headline = models.CharField(max_length=500, default="no headline")
content = RichTextField(max_length=10000)
# discussion tags from user model
tags = models.ManyToManyField(TopicTag, related_name='discussion_tags', blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.headline)
class DiscussionComment(models.Model):
id = models.UUIDField(default=uuid.uuid4, unique=True, primary_key=True, editable=False)
discussion = models.ForeignKey(Discussion,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True)
content = models.TextField(max_length=1000)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.user.username)
class DiscussionVote(models.Model):
id = models.UUIDField(default=uuid.uuid4, unique=True, primary_key=True, editable=False)
user = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True)
discussion = models.ForeignKey(Discussion, on_delete=models.CASCADE)
comment = models.ForeignKey(DiscussionComment, on_delete=models.SET_NULL,null=True, blank=True)
value = models.IntegerField(blank=True, null=True, default=0)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.discussion} - count - {self.value}"
| 42.309524
| 99
| 0.756331
|
2530823d17bc4ee1f2546447684b700eb78f99d2
| 20,659
|
py
|
Python
|
swapnil_tamrakar_lab_6_creating_validation_data.py
|
swappy208/Artificial-Intelligence-Computational-Model
|
fbbbf13fe3912243645f22cc9724721ca3c23162
|
[
"Apache-2.0"
] | null | null | null |
swapnil_tamrakar_lab_6_creating_validation_data.py
|
swappy208/Artificial-Intelligence-Computational-Model
|
fbbbf13fe3912243645f22cc9724721ca3c23162
|
[
"Apache-2.0"
] | null | null | null |
swapnil_tamrakar_lab_6_creating_validation_data.py
|
swappy208/Artificial-Intelligence-Computational-Model
|
fbbbf13fe3912243645f22cc9724721ca3c23162
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Swapnil Tamrakar Lab 6: Creating Validation Data.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1qM6t-O6UEv6QrXCm17CgnIBYIw5XrZqY
#### Copyright 2017 Google LLC.
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""# Lab 6: Creating Validation Data
**Learning Objectives:**
* Generate a train and validation data set for housing data that we will use to predict the median housing price, at the granularity of city blocks.
* Debug issues in the creation of the train and validation splits.
* Select the best single feature to use to train a linear model to predict the median housing price.
* Test that the prediction loss on the validation data accurately reflect the trained model's loss on unseen test data.
### Standard Set-up
We begin with the standard set-up. In this lab we use a data set based on 1990 census data from California. Since this data set has a header row, we don't need to provide the column names.
"""
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io, estimator
# This line increases the amount of logging when there is an error. You can
# remove it if you want less logging.
tf.logging.set_verbosity(tf.logging.ERROR)
# Set the output display to have two digits for decimal places, for display
# readability only and limit it to printing 15 rows.
pd.options.display.float_format = '{:.2f}'.format
pd.options.display.max_rows = 15
"""Read the data set."""
california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
california_housing_dataframe.describe()
"""##Prepare Features
As our learning models get more sophisticated, we will want to do some computation on the features and even generate new features from the existing features. We see examples of this in later labs. For now this method will just make a copy of the portion of the dataframe we plan to use, and re-scale the median-house value (to make it a bit easier to work with).
"""
def prepare_features(dataframe):
"""Prepares the features for the provided dataset.
Args:
dataframe: A Pandas DataFrame containing the data set.
Returns:
A new DataFrame that contains the features to be used for the model.
"""
processed_features = dataframe.copy()
# Modifying median_house_value to be in scale of $1000. So a value of 14.0
# will correspond to $14,000. This will make it a bit easier to work with.
processed_features["median_house_value"] /= 1000.0
return processed_features
"""## Define Standard Functions to Train and Evaluate a Linear Regression Model
As part of this lab you will train linear regression model to predict the median home price from the median family income. We copy all of the functions needed to do this from the previous labs so you have them available to use later in this lab.
### Compute Loss
Here is a simple method to compute the loss on the given input function and targets.
"""
def compute_loss(model, input_fn, targets):
""" Computes the loss (RMSE) for linear regression.
Args:
model: the trained model to use for making the predictions
input_fn: the input_fn to use to make the predictions
targets: a list of the target values being predicted that must be the
same length as predictions.
Returns:
The RMSE for the provided predictions and targets.
"""
predictions = list(model.predict(input_fn=input_fn))
return math.sqrt(metrics.mean_squared_error(predictions, targets))
"""### Setting Up the Feature Columns and Input Function for TensorFlow
We create a list of the categorical and numerical features that we will use for training our model. Recall that it's okay if one of these lists is empty. In this lab in addition to having a training set, we introduce a validation set that will be used to select features and tune the hyperparameters used for training. There's also a test data set representing the unseen data that we want the model to generalize to perform well. To be able to train or just evaluate a model for these data sets, we define `train_input_fn` to use the training data, `eval_input_fn` to use the validation data, and `test_input_fn` to use the test data.
"""
CATEGORICAL_COLUMNS = []
NUMERICAL_COLUMNS = ["latitude", "longitude", "housing_median_age",
"total_rooms", "total_bedrooms", "population",
"households", "median_income", "median_house_value"]
def input_fn(dataframe):
"""Constructs a dictionary for the feature columns.
Args:
dataframe: The Pandas DataFrame to use for the input.
Returns:
The feature columns and the associated labels for the provided input.
"""
# Creates a dictionary mapping each numeric feature column name (k) to
# the values of that column stored in a constant Tensor.
numerical_cols = {k: tf.constant(dataframe[k].values)
for k in NUMERICAL_COLUMNS}
# Creates a dictionary mapping each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(dataframe[k].size)],
values=dataframe[k].values,
dense_shape=[dataframe[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(numerical_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(dataframe[LABEL].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_input_fn():
return input_fn(training_examples)
def eval_input_fn():
return input_fn(validation_examples)
def test_input_fn():
return input_fn(test_examples)
"""### Functions to help visualize our results
As in past labs, we define functions to generate a calibration plot and learning curve with the change that the learning curve will include both the training loss and validation loss in order to help visually see when we are starting to overfit the data.
"""
def make_calibration_plot(predictions, targets):
""" Creates a calibration plot.
Args:
predictions: a list of values predicted by the model.
targets: a list of the target values being predicted that must be the
same length as predictions.
"""
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
calibration_data.describe()
min_val = calibration_data["predictions"].min()
max_val = calibration_data["predictions"].max()
plt.ylabel("target")
plt.xlabel("prediction")
plt.scatter(predictions, targets, color='black')
plt.plot([min_val, max_val], [min_val, max_val])
def plot_learning_curve(training_losses, validation_losses):
""" Plot the learning curve.
Args:
training_loses: a list of losses to plot.
validation_losses: a list of validation losses to plot.
"""
plt.ylabel('Loss')
plt.xlabel('Training Steps')
plt.plot(training_losses, label="training")
plt.plot(validation_losses, label="validation")
plt.legend(loc=1)
"""### Defining the features
This data set only has numerical features. As a starting point we will introduce one `real_valued_column` for each feature we want to use in predicting the `median_house_value`. In the below code, we have set this to `households`, however, you are encouraged to switch this to a feature you think might be more relevant to predict the median house value.
"""
NUMERICAL_FEATURES = ["households"]
LABEL = "median_house_value"
def construct_feature_columns():
"""Construct TensorFlow Feature Columns for the given features.
Returns:
A set of feature columns.
"""
feature_set = set([tf.contrib.layers.real_valued_column(feature)
for feature in NUMERICAL_FEATURES])
return feature_set
"""### Functions for defining the linear regression model and training it
We slightly modify our function to train a model to also store the validation loss so that we can include it on our learning curve. We use a calibration plot as a way of visualizing the model.
"""
def define_linear_regression_model(learning_rate):
""" Defines a linear regression model of one feature to predict the target.
Args:
learning_rate: A `float`, the learning rate.
Returns:
A linear regressor created with the given parameters.
"""
linear_regressor = tf.contrib.learn.LinearRegressor(
feature_columns=construct_feature_columns(),
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate),
gradient_clip_norm=5.0
)
return linear_regressor
def train_model(linear_regressor, steps):
"""Trains a linear regression model.
Args:
linear_regressor: The regressor to train.
steps: A non-zero `int`, the total number of training steps.
Returns:
The trained regressor.
"""
# In order to see how the model evolves as we train it, we divide the
# steps into periods and show the model after each period.
periods = 10
steps_per_period = steps / periods
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics. We store the training and validation losses so we can
# generate a learning curve.
print "Training model..."
training_losses = []
validation_losses = []
for period in range (0, periods):
# Call fit to train the regressor for steps_per_period steps.
linear_regressor.fit(input_fn=train_input_fn, steps=steps_per_period)
# Compute the loss between the predictions and the correct labels, append
# the training and validation loss to the list of losses used to generate
# the learning curve after training is complete and print the current
# training loss.
training_loss = compute_loss(linear_regressor, train_input_fn,
training_examples[LABEL])
validation_loss = compute_loss(linear_regressor, eval_input_fn,
validation_examples[LABEL])
training_losses.append(training_loss)
validation_losses.append(validation_loss)
print " Training loss after period %02d : %0.3f" % (period, training_loss)
# Now that training is done print the final training and validation losses.
print "Final Training Loss (RMSE): %0.3f" % training_loss
print "Final Validation Loss (RMSE): %0.3f" % validation_loss
# Generate a figure with the learning curve on the left and a
# calibration plot on the right.
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.title("Learning Curve (RMSE vs time)")
plot_learning_curve(training_losses, validation_losses)
plt.subplot(1, 2, 2)
plt.tight_layout(pad=1.1, w_pad=3.0, h_pad=3.0)
plt.title("Calibration Plot on Validation Data")
validation_predictions = np.array(list(linear_regressor.predict(
input_fn=eval_input_fn)))
make_calibration_plot(validation_predictions, validation_examples[LABEL])
return linear_regressor
"""##Divide the provided data for training our model into a training and validation set
Our goal for training a model is to make predictions on new unseen data. As the model gets larger (in terms of the number of weights we are learning), it is possible to start memorizing the training data and overfitting noise that might be in that data. When overfitting occurs our model will make poor predictions on new data which defeats our purpose. Thus we need a mechanism to recongize when overfitting occurs. A common way todo this is to set aside some of the training data as a validation set using the rest as our training set.
For the ***training set***, we'll choose the first 14000 examples, out of the total of 17000.
"""
training_examples = prepare_features(california_housing_dataframe.head(14000))
training_examples.describe()
"""For the ***validation set***, we'll choose the last 3000 examples, out of the total of 17000."""
validation_examples = prepare_features(california_housing_dataframe.tail(3000))
validation_examples.describe()
"""### Examine the data
Let's take a close look at two features in particular: **`latitude`** and **`longitude`**. These are geographical coordinates of the city block in question.
This might make a nice visualization — let's plot `latitude` and `longitude`, and use color to show the `median_house_value`.
"""
plt.figure(figsize=(13, 8))
ax = plt.subplot(1, 2, 1)
ax.set_title("Training Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(training_examples["longitude"],
training_examples["latitude"],
cmap="coolwarm",
c=training_examples["median_house_value"] / training_examples["median_house_value"].max())
ax = plt.subplot(1, 2, 2)
ax.set_title("Validation Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(validation_examples["longitude"],
validation_examples["latitude"],
cmap="coolwarm",
c=validation_examples["median_house_value"] / validation_examples["median_house_value"].max())
_ = plt.plot()
"""## Task 1: Train a Model (1 point)
Pick the single feature that you think would lead to the best model to prdict the `median_house_value`. Adjust the learning_rate and steps to train a good model. HINT: you should be able to get the RMSE down to around 80. Using `households` is not a good choice! Look at what other features are available.
"""
NUMERICAL_FEATURES = ["median_income"]
LABEL = "median_house_value"
LEARNING_RATE = 0.4
STEPS = 50
linear_regressor = define_linear_regression_model(learning_rate = LEARNING_RATE)
linear_regressor = train_model(linear_regressor, steps=STEPS)
"""### Load the Provided Test Data
This data set (as with many) comes with a provided test data set that is representative and is used to evaluate the final performance. For this data set, the provided test data is located [here](https://storage.googleapis.com/ml_universities/california_housing_test.csv).
The purpose of having validation data is to notice overfitting and other problems. Remember our key goal is to train a model that will make good predictions on **new unseen data**. Remember that the test data should only be used at the end to see how your final model is performing. It should not be used in helping select which features to use or to select hyperparameter values.
"""
california_housing_test_data = pd.read_csv(
"https://storage.googleapis.com/ml_universities/california_housing_test.csv",
sep=",")
test_examples = prepare_features(california_housing_test_data)
"""## Task 2: Measure the Test Error (1/2 point)
Modify the codebox below to measure the test error. Look at the training error, validation error, and test error for the model you trained in Task 1. You should only do this after you have picked your hyperparameters.
"""
# put your code here
print "Training Error : ",compute_loss(linear_regressor, train_input_fn, training_examples[LABEL])
print "Validation Error : ",compute_loss(linear_regressor, eval_input_fn, validation_examples[LABEL])
print "Test Error : ",compute_loss(linear_regressor, test_input_fn, test_examples[LABEL])
"""## Task 3: Recognize the Problem in the Splitting of the Data (1 point)
There's something that we forgot to do above and it is going to cause a problem. You need to figure out what that is and fix it before you can move on. Below is some guidance to help you recognize there is a problem and from there you should be able to think through the issue and figure out the cause and what you can do to correct it.
We should see something that resembles a map of California, with red showing up in expensive areas like the San Francisco and Los Angeles. The training set sort of does, but the validation data does not. Answer the following questions directly in the comment area of the codebox below.
A) Do you see any other differences in the distributions of features or targets
between the training and validation data?
Answer:
The longitude of the training and validation data differ a lot as the validation data contains longitude unseen in the training data and vice versa.
B) Why is this happening?
Answer:
It is most likely because the data is sorted by latitude and longitude and the so the validation data contains data that isn't close to the training data at all.
This is reflected in the latitude longitude graph as the training data only contains data to the right of -122 and the validation data contains data to the left of -122.
C) How is this problem reflected when you look at the relationship between the
training error, validation error and test error?
This is reflected as a huge difference between the RMSE for training data and validation data but training data and test data have very less difference.
This clearly shows that it is because of the validation data being totally new data of diferent regions than the training data.
## Task 4: Fix the Problem (1 point)
Make the changes here to how the training and validation examples are created and call prepare features again. (1 point)
"""
## Add what you need to this code block to fix the issue you have seen ##
california_housing_dataframe = california_housing_dataframe.sample(frac=1).reset_index(drop=True)
## Regenerate the training and validation examples -- put your changes here
training_examples = prepare_features(california_housing_dataframe.head(14000))
validation_examples = prepare_features(california_housing_dataframe.tail(3000))
"""####Here we again view the map view of the data to confirm if the issue has been resolved. Does this look different than above? If not, you still have not fixed the issue. It should be very clear once you have."""
plt.figure(figsize=(13, 8))
ax = plt.subplot(1, 2, 1)
ax.set_title("Training Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(training_examples["longitude"],
training_examples["latitude"],
cmap="coolwarm",
c=training_examples["median_house_value"] / training_examples["median_house_value"].max())
ax = plt.subplot(1, 2, 2)
ax.set_title("Validation Data")
ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(validation_examples["longitude"],
validation_examples["latitude"],
cmap="coolwarm",
c=validation_examples["median_house_value"] / validation_examples["median_house_value"].max())
_ = plt.plot()
"""## Task 5: Re-train a Model (1 point)
You may need to adjust your hyperparameter but use the same feature. Again, only use the training and validation data to do this portion.
"""
NUMERICAL_FEATURES = ["median_income"]
LABEL = "median_house_value"
LEARNING_RATE = 0.4
STEPS = 50
linear_regressor = define_linear_regression_model(learning_rate = LEARNING_RATE)
linear_regressor = train_model(linear_regressor, steps=STEPS)
"""## Task 6: Compute the Test Error for Your New Model (1/2 point)
Compute the test error and then answer the question below.
"""
# Fill this in
print "Training Error : ",compute_loss(linear_regressor, train_input_fn, training_examples[LABEL])
print "Validation Error : ",compute_loss(linear_regressor, eval_input_fn, validation_examples[LABEL])
print "Test Error : ",compute_loss(linear_regressor, test_input_fn, test_examples[LABEL])
"""
How do the training error, validation error and test error now compare to each
other?
ANSWER:
The training error, validation error and test error are almost the same.
"""
| 43.676533
| 636
| 0.750956
|
c26abfbb42761a46a0e7e008187ed8590aa2e158
| 170,846
|
py
|
Python
|
bottle.py
|
sailfish009/bottle
|
a6384a93a8fff4c1be44fd808f71bad7728a4c74
|
[
"MIT"
] | null | null | null |
bottle.py
|
sailfish009/bottle
|
a6384a93a8fff4c1be44fd808f71bad7728a4c74
|
[
"MIT"
] | null | null | null |
bottle.py
|
sailfish009/bottle
|
a6384a93a8fff4c1be44fd808f71bad7728a4c74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2009-2018, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# Command-line interface ######################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to _main() is at the end of the file.
def _cli_parse(args): # pragma: no coverage
from argparse import ArgumentParser
parser = ArgumentParser(prog=args[0], usage="%(prog)s [options] package.module:app")
opt = parser.add_argument
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opt('app', help='WSGI app entry point.', nargs='?')
cli_args = parser.parse_args(args[1:])
return cli_args, parser
def _cli_patch(cli_args): # pragma: no coverage
parsed_args, _ = _cli_parse(cli_args)
opts = parsed_args
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ##########################################
###############################################################################
import base64, calendar, cgi, email.utils, functools, hmac, imp, itertools,\
mimetypes, os, re, tempfile, threading, time, warnings, weakref, hashlib
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
try:
from ujson import dumps as json_dumps, loads as json_lds
except ImportError:
from json import dumps as json_dumps, loads as json_lds
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
try:
from inspect import getfullargspec
def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
except ImportError:
from inspect import getargspec
py = sys.version_info
py3k = py.major > 2
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie, Morsel, CookieError
from collections.abc import MutableMapping as DictMixin
import pickle
from io import BytesIO
import configparser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie, Morsel, CookieError
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
import ConfigParser as configparser
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
exec(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
if isinstance(s, unicode):
return s.encode(enc)
return b'' if s is None else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
return unicode("" if s is None else s)
tonat = touni if py3k else tob
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(major, minor, cause, fix):
text = "Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\n"\
"Cause: %s\n"\
"Fix: %s\n" % (major, minor, cause, fix)
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events #######################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>])+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if g[2] is not None:
depr(0, 13, "Use of old route syntax.",
"Use <name> instead of :name in routes.")
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as E:
raise RouteBuildError('Missing URL argument: %r' % E.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(method)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turning an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = app.config._make_overlay()
self.config.load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
depr(0, 13, "Route.get_config() is deprectated.",
"The Route.config property already includes values from the"
" application config for missing keys. Access it directly.")
return self.config.get(key, default)
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0, 13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.disable'] = True
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
if py3k:
# Errors here mean that the mounted WSGI app did not
# follow PEP-3333 (which requires latin1) or used a
# pre-encoding other than utf8 :/
status = status.encode('latin1').decode('utf8')
headerlist = [(k, v.encode('latin1').decode('utf8'))
for (k, v) in headerlist]
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route`, urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500, callback=None):
""" Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404'
"""
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
self.error_handler[int(code)] = callback
return callback
return decorator(callback) if callback else decorator
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
while True: # Remove in 0.14 together with RouteReset
out = None
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
break
except HTTPResponse as E:
out = E
break
except RouteReset:
depr(0, 13, "RouteReset exception deprecated",
"Call route.call() after route.reset() and "
"return the result.")
route.reset()
continue
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._wsgi_status_line(), response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None, digestmod=hashlib.sha256):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret:
# See BaseResponse.set_cookie for details on signed cookies.
if value and value.startswith('!') and '?' in value:
sig, msg = map(tob, value[1:].split('?', 1))
hash = hmac.new(tob(secret), msg, digestmod=digestmod).digest()
if _lscmp(sig, base64.b64encode(hash)):
dst = pickle.loads(base64.b64decode(msg))
if dst and dst[0] == key:
return dst[1]
return default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
forms.recode_unicode = self.POST.recode_unicode
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
files.recode_unicode = self.POST.recode_unicode
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json`` or
``application/json-rpc``, this property holds the parsed content
of the request body. Only requests smaller than :attr:`MEMFILE_MAX`
are processed to avoid memory exhaustion.
Invalid JSON raises a 400 error response.
"""
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype in ('application/json', 'application/json-rpc'):
b = self._get_body_string(self.MEMFILE_MAX)
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self, maxread):
""" Read body into a string. Raise HTTPError(413) on requests that are
to large. """
if self.content_length > maxread:
raise HTTPError(413, 'Request entity too large')
data = self.body.read(maxread + 1)
if len(data) > maxread:
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
body = tonat(self._get_body_string(self.MEMFILE_MAX), 'latin1')
for key, value in _parse_qsl(body):
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py3k:
args['encoding'] = 'utf8'
post.recode_unicode = False
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: frozenset(('Content-Type', 'Content-Length')),
304: frozenset(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
cookies = copy._cookies = SimpleCookie()
for k,v in self._cookies.items():
cookies[k] = v.value
cookies[k].update(v) # also copy cookie attributes
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
if '\n' in status or '\r' in status or '\0' in status:
raise ValueError('Status line must not include control chars.')
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
def _wsgi_status_line(self):
""" WSGI conform status line (latin1-encodeable) """
if py3k:
return self._status_line.encode('utf8').decode('latin1')
return self._status_line
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, digestmod=hashlib.sha256, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param maxage: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
:param samesite: Control or disable third-party use for this cookie.
Possible values: `lax`, `strict` or `none` (default).
If neither `expires` nor `maxage` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Pickle is a potentially dangerous format. If an attacker
gains access to the secret key, he could forge cookies that execute
code on server side if unpickled. Using pickle is discouraged and
support for it will be removed in later versions of bottle.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
# Monkey-patch Cookie lib to support 'SameSite' parameter
# https://tools.ietf.org/html/draft-west-first-party-cookies-07#section-4.1
if py < (3, 8, 0):
Morsel._reserved.setdefault('samesite', 'SameSite')
if secret:
if not isinstance(value, basestring):
depr(0, 13, "Pickling of arbitrary objects into cookies is "
"deprecated.", "Only store strings in cookies. "
"JSON strings are fine, too.")
encoded = base64.b64encode(pickle.dumps([name, value], -1))
sig = base64.b64encode(hmac.new(tob(secret), encoded,
digestmod=digestmod).digest())
value = touni(tob('!') + sig + tob('?') + encoded)
elif not isinstance(value, basestring):
raise TypeError('Secret key required for non-string cookies.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key in ('max_age', 'maxage'): # 'maxage' variant added in 0.13
key = 'max-age'
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
value = http_date(value)
if key in ('same_site', 'samesite'): # 'samesite' variant added in 0.13
key, value = 'samesite', (value or "none").lower()
if value not in ('lax', 'strict', 'none'):
raise CookieError("Invalid value for SameSite")
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def setup(self, app):
app.config._define('json.enable', default=True, validate=bool,
help="Enable or disable automatic dict->json filter.")
app.config._define('json.ascii', default=False, validate=bool,
help="Use only 7-bit ASCII characters in output.")
app.config._define('json.indent', default=True, validate=bool,
help="Add whitespace to make json more readable.")
app.config._define('json.dump_func', default=None,
help="If defined, use this function to transform"
" dict into json. The other options no longer"
" apply.")
def apply(self, callback, route):
dumps = self.json_dumps
if not self.json_dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPResponse as resp:
rv = resp
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
_UNSET = object()
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, overlays and more.
This dict-like class is heavily optimized for read access. All read-only
methods as well as item access should be as fast as the built-in dict.
"""
__slots__ = ('_meta', '_change_listener', '_overlays', '_virtual_keys', '_source', '__weakref__')
def __init__(self):
self._meta = {}
self._change_listener = []
#: Weak references of overlays that need to be kept in sync.
self._overlays = []
#: Config that is the source for this overlay.
self._source = None
#: Keys of values copied from the source (values we do not own)
self._virtual_keys = set()
def load_module(self, path, squash=True):
"""Load values from a Python module.
Example modue ``config.py``::
DEBUG = True
SQLITE = {
"db": ":memory:"
}
>>> c = ConfigDict()
>>> c.load_module('config')
{DEBUG: True, 'SQLITE.DB': 'memory'}
>>> c.load_module("config", False)
{'DEBUG': True, 'SQLITE': {'DB': 'memory'}}
:param squash: If true (default), dictionary values are assumed to
represent namespaces (see :meth:`load_dict`).
"""
config_obj = load(path)
obj = {key: getattr(config_obj, key) for key in dir(config_obj)
if key.isupper()}
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename, **options):
""" Load values from an ``*.ini`` style config file.
A configuration file consists of sections, each led by a
``[section]`` header, followed by key/value entries separated by
either ``=`` or ``:``. Section names and keys are case-insensitive.
Leading and trailing whitespace is removed from keys and values.
Values can be omitted, in which case the key/value delimiter may
also be left out. Values can also span multiple lines, as long as
they are indented deeper than the first line of the value. Commands
are prefixed by ``#`` or ``;`` and may only appear on their own on
an otherwise empty line.
Both section and key names may contain dots (``.``) as namespace
separators. The actual configuration parameter name is constructed
by joining section name and key name together and converting to
lower case.
The special sections ``bottle`` and ``ROOT`` refer to the root
namespace and the ``DEFAULT`` section defines default values for all
other sections.
With Python 3, extended string interpolation is enabled.
:param filename: The path of a config file, or a list of paths.
:param options: All keyword parameters are passed to the underlying
:class:`python:configparser.ConfigParser` constructor call.
"""
options.setdefault('allow_no_value', True)
if py3k:
options.setdefault('interpolation',
configparser.ExtendedInterpolation())
conf = configparser.ConfigParser(**options)
conf.read(filename)
for section in conf.sections():
for key in conf.options(section):
value = conf.get(section, key)
if section not in ['bottle', 'ROOT']:
key = section + '.' + key
self[key.lower()] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
>>> c = ConfigDict()
>>> c.update('some.namespace', key='value')
"""
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
self._virtual_keys.discard(key)
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
if key in self._virtual_keys:
raise KeyError("Virtual keys cannot be deleted: %s" % key)
if self._source and key in self._source:
# Not virtual, but present in source -> Restore virtual value
dict.__delitem__(self, key)
self._set_virtual(key, self._source[key])
else: # not virtual, not present in source. This is OUR value
self._on_change(key, None)
dict.__delitem__(self, key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _set_virtual(self, key, value):
""" Recursively set or update virtual keys. Do nothing if non-virtual
value is present. """
if key in self and key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
self._virtual_keys.add(key)
if key in self and self[key] is not value:
self._on_change(key, value)
dict.__setitem__(self, key, value)
for overlay in self._iter_overlays():
overlay._set_virtual(key, value)
def _delete_virtual(self, key):
""" Recursively delete virtual entry. Do nothing if key is not virtual.
"""
if key not in self._virtual_keys:
return # Do nothing for non-virtual keys.
if key in self:
self._on_change(key, None)
dict.__delitem__(self, key)
self._virtual_keys.discard(key)
for overlay in self._iter_overlays():
overlay._delete_virtual(key)
def _on_change(self, key, value):
for cb in self._change_listener:
if cb(self, key, value):
return True
def _add_change_listener(self, func):
self._change_listener.append(func)
return func
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
def _define(self, key, default=_UNSET, help=_UNSET, validate=_UNSET):
""" (Unstable) Shortcut for plugins to define own config parameters. """
if default is not _UNSET:
self.setdefault(key, default)
if help is not _UNSET:
self.meta_set(key, 'help', help)
if validate is not _UNSET:
self.meta_set(key, 'validate', validate)
def _iter_overlays(self):
for ref in self._overlays:
overlay = ref()
if overlay is not None:
yield overlay
def _make_overlay(self):
""" (Unstable) Create a new overlay that acts like a chained map: Values
missing in the overlay are copied from the source map. Both maps
share the same meta entries.
Entries that were copied from the source are called 'virtual'. You
can not delete virtual keys, but overwrite them, which turns them
into non-virtual entries. Setting keys on an overlay never affects
its source, but may affect any number of child overlays.
Other than collections.ChainMap or most other implementations, this
approach does not resolve missing keys on demand, but instead
actively copies all values from the source to the overlay and keeps
track of virtual and non-virtual keys internally. This removes any
lookup-overhead. Read-access is as fast as a build-in dict for both
virtual and non-virtual keys.
Changes are propagated recursively and depth-first. A failing
on-change handler in an overlay stops the propagation of virtual
values and may result in an partly updated tree. Take extra care
here and make sure that on-change handlers never fail.
Used by Route.config
"""
# Cleanup dead references
self._overlays[:] = [ref for ref in self._overlays if ref() is not None]
overlay = ConfigDict()
overlay._meta = self._meta
overlay._source = self
self._overlays.append(weakref.ref(overlay))
for key in self:
overlay._set_virtual(key, self[key])
return overlay
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024, close=False):
""" Yield chunks from a range in a file, optionally closing it at the end.
No chunk is bigger than maxread. """
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part:
break
bytes -= len(part)
yield part
if close:
fp.close()
def static_file(filename, root,
mimetype=True,
download=False,
charset='UTF-8',
etag=None,
headers=None):
""" Open a file in a safe way and return an instance of :exc:`HTTPResponse`
that can be sent back to the client.
:param filename: Name or path of the file to send, relative to ``root``.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Provide the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset for files with a ``text/*`` mime-type.
(default: UTF-8)
:param etag: Provide a pre-computed ETag header. If set to ``False``,
ETag handling is disabled. (default: auto-generate ETag header)
:param headers: Additional headers dict to add to the response.
While checking user input is always a good idea, this function provides
additional protection against malicious ``filename`` parameters from
breaking out of the ``root`` directory and leaking sensitive information
to an attacker.
Read-protected files or files outside of the ``root`` directory are
answered with ``403 Access Denied``. Missing files result in a
``404 Not Found`` response. Conditional requests (``If-Modified-Since``,
``If-None-Match``) are answered with ``304 Not Modified`` whenever
possible. ``HEAD`` and ``Range`` requests (used by download managers to
check or continue partial downloads) are also handled automatically.
"""
root = os.path.join(os.path.abspath(root), '')
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = headers or {}
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype is True:
if download and download is not True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding:
headers['Content-Encoding'] = encoding
if mimetype:
if (mimetype[:5] == 'text/' or mimetype == 'application/javascript')\
and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download is True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
headers['Last-Modified'] = email.utils.formatdate(stats.st_mtime,
usegmt=True)
headers['Date'] = email.utils.formatdate(time.time(), usegmt=True)
getenv = request.environ.get
if etag is None:
etag = '%d:%d:%d:%d:%s' % (stats.st_dev, stats.st_ino, stats.st_mtime,
clen, filename)
etag = hashlib.sha1(tob(etag)).hexdigest()
if etag:
headers['ETag'] = etag
check = getenv('HTTP_IF_NONE_MATCH')
if check and check == etag:
return HTTPResponse(status=304, **headers)
ims = getenv('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
range_header = getenv('HTTP_RANGE')
if range_header:
ranges = list(parse_range_header(range_header, clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset, close=True)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, basestring):
return value
if isinstance(value, datetime):
# aware datetime.datetime is converted to UTC time
# naive datetime.datetime is treated as UTC time
value = value.utctimetuple()
elif isinstance(value, datedate):
# datetime.date is naive, and is treated as UTC time
value = value.timetuple()
if not isinstance(value, (int, float)):
# convert struct_time in UTC to UNIX timestamp
value = calendar.timegm(value)
return email.utils.formatdate(value, usegmt=True)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return calendar.timegm(ts[:8] + (0, )) - (ts[9] or 0)
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
#: Header tokenizer used by _parse_http_header()
_hsplit = re.compile('(?:(?:"((?:[^"\\\\]|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
and parameters. For non-standard or broken input, this implementation may return partial results.
:param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``)
:return: List of (value, params) tuples. The second element is a (possibly empty) dict.
"""
values = []
if '"' not in h: # INFO: Fast path without regexp (~2x faster)
for value in h.split(','):
parts = value.split(';')
values.append((parts[0].strip(), {}))
for attr in parts[1:]:
name, value = attr.split('=', 1)
values[-1][1][name.strip()] = value.strip()
else:
lop, key, attrs = ',', None, {}
for quoted, plain, tok in _hsplit(h):
value = plain.strip() if plain else quoted.replace('\\"', '"')
if lop == ',':
attrs = {}
values.append((value, attrs))
elif lop == ';':
if tok == '=':
key = value
else:
attrs[value] = ''
elif lop == '=' and key:
attrs[key] = value
key = None
lop = tok
return values
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
depr(0, 13, "cookie_encode() will be removed soon.",
"Do not use this API directly.")
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key, digestmod=None):
""" Verify and decode an encoded string. Return an object or None."""
depr(0, 13, "cookie_decode() will be removed soon.",
"Do not use this API directly.")
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
digestmod = digestmod or hashlib.sha256
hashed = hmac.new(tob(key), msg, digestmod=digestmod).digest()
if _lscmp(sig[1:], base64.b64encode(hashed)):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
depr(0, 13, "cookie_is_encoded() will be removed soon.",
"Do not use this API directly.")
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
# Before you edit or add a server adapter, please read:
# - https://github.com/bottlepy/bottle/pull/647#issuecomment-60152870
# - https://github.com/bottlepy/bottle/pull/865#issuecomment-242795341
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
depr(0, 13, "The wsgi server part of cherrypy was split into a new "
"project called 'cheroot'.", "Use the 'cheroot' server "
"adapter instead of cherrypy.")
from cheroot.wsgi import Server as CherryPyWSGIServer
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class CherootServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cheroot import wsgi
from cheroot.ssl import builtin
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.pop('certfile', None)
keyfile = self.options.pop('keyfile', None)
chainfile = self.options.pop('chainfile', None)
server = wsgi.Server(**self.options)
if certfile and keyfile:
server.ssl_adapter = builtin.BuiltinSSLAdapter(
certfile, keyfile, chainfile)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
depr(0, 13, "AppEngineServer no longer required",
"Configure your application directly in your app.yaml")
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import BaseApplication
if self.host.startswith("unix:"):
config = {'bind': self.host}
else:
config = {'bind': "%s:%d" % (self.host, self.port)}
config.update(self.options)
class GunicornApplication(BaseApplication):
def load_config(self):
for key, value in config.items():
self.cfg.set(key, value)
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port, reuse_port=True)
class AsyncioServerAdapter(ServerAdapter):
""" Extend ServerAdapter for adding custom event loop """
def get_event_loop(self):
pass
class AiohttpServer(AsyncioServerAdapter):
""" Asynchronous HTTP client/server framework for asyncio
https://pypi.python.org/pypi/aiohttp/
https://pypi.org/project/aiohttp-wsgi/
"""
def get_event_loop(self):
import asyncio
return asyncio.new_event_loop()
def run(self, handler):
import asyncio
from aiohttp_wsgi.wsgi import serve
self.loop = self.get_event_loop()
asyncio.set_event_loop(self.loop)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
serve(handler, host=self.host, port=self.port)
class AiohttpUVLoopServer(AiohttpServer):
"""uvloop
https://github.com/MagicStack/uvloop
"""
def get_event_loop(self):
import uvloop
return uvloop.new_event_loop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
CherootServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'cheroot': CherootServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'uvloop': AiohttpUVLoopServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
if server.host.startswith("unix:"):
_stderr("Listening on %s\n" % server.host)
else:
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets too old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '') or ''
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.name)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
if name == self.filename:
fname = name
else:
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return (f.read().decode(self.encoding), fname, lambda: False)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''(
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n])*?)%%(inline_end)s''' % _re_inl
# add the flag in front of the regexp to avoid Deprecation warning (see Issue #949)
# verbose and dot-matches-newline mode
_re_tok = '(?mx)' + _re_tok
_re_inl = '(?mx)' + _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line = _blk1
self.indent += 1
self.indent_mod -= 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line = _blk2
self.indent_mod -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
elif _end:
self.indent -= 1
self.indent_mod += 1
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[451] = "Unavailable For Legal Reasons" # RFC 7725
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
%%try:
%%exc = repr(e.exception)
%%except:
%%exc = '<unprintable %%s object>' %% type(e.exception).__name__
%%end
<pre>{{exc}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app now deferred until needed)
# BC: 0.6.4 and needed for run()
apps = app = default_app = AppStack()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
def _main(argv): # pragma: no coverage
args, parser = _cli_parse(argv)
def _cli_error(cli_msg):
parser.print_help()
_stderr('\nError: %s\n' % cli_msg)
sys.exit(1)
if args.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args.app:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (args.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in args.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except configparser.Error as parse_error:
_cli_error(parse_error)
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError) as error:
_cli_error("Unable to parse config file %r: %s" % (cfile, error))
for cval in args.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args.app,
host=host,
port=int(port),
server=args.server,
reloader=args.reload,
plugins=args.plugin,
debug=args.debug,
config=config)
if __name__ == '__main__': # pragma: no coverage
_main(sys.argv)
| 38.600542
| 117
| 0.58191
|
4af197ebe332e027e29849c418500ef9619b3d08
| 533
|
py
|
Python
|
cc/engine/characteristic/views.py
|
Abbas-000/cc.engine
|
eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22
|
[
"MIT"
] | 6
|
2017-12-25T08:18:43.000Z
|
2021-01-02T09:02:59.000Z
|
cc/engine/characteristic/views.py
|
Abbas-000/cc.engine
|
eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22
|
[
"MIT"
] | 39
|
2017-11-17T01:59:38.000Z
|
2021-12-14T19:14:12.000Z
|
cc/engine/characteristic/views.py
|
Abbas-000/cc.engine
|
eb4b5e5f6c695a16c7ab8bcc52036cf16a0fba22
|
[
"MIT"
] | 17
|
2017-12-25T08:18:13.000Z
|
2021-04-12T12:50:35.000Z
|
from cc.engine import util
from webob import Response
def characteristic_view(request):
"""
Return one of the characteristic description pages.
"""
target_lang = util.get_target_lang_from_request(request)
template_name = 'characteristic/%s.html' % (
request.matchdict['characteristic'])
context = {'request': request}
context.update(util.rtl_context_stuff(target_lang))
return Response(
util.render_template(
request, target_lang,
template_name, context))
| 25.380952
| 60
| 0.688555
|
313c421b4de8cfe877e2121300f3b2eedd2bd35c
| 298
|
py
|
Python
|
FishCDailyQuestion/ex021-030/Python3_023/023_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
FishCDailyQuestion/ex021-030/Python3_023/023_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
FishCDailyQuestion/ex021-030/Python3_023/023_02.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding:utf-8
def solve(lst):
res = []
for e in str(lst).split(','):
t = eval(e.strip(' []'))
res.append(t)
return res
if __name__ == "__main__":
list1 = [1, [2], [3, 4], [[5, 6], 7], [8, [[9, [10], 11], 12], 13]]
print(solve(list1))
| 18.625
| 71
| 0.47651
|
46a06f2aab4beae9de4b4f0a3e9af4b01a47794f
| 1,869
|
py
|
Python
|
sans/_lock.py
|
zephyrkul/sans
|
4b1d4dde110b4f3ceca0b8a5d3a75460bcc43105
|
[
"MIT"
] | 6
|
2019-01-17T17:25:06.000Z
|
2021-06-05T17:24:30.000Z
|
sans/_lock.py
|
zephyrkul/sans
|
4b1d4dde110b4f3ceca0b8a5d3a75460bcc43105
|
[
"MIT"
] | 3
|
2019-01-19T07:55:14.000Z
|
2021-04-06T01:58:57.000Z
|
sans/_lock.py
|
zephyrkul/sans
|
4b1d4dde110b4f3ceca0b8a5d3a75460bcc43105
|
[
"MIT"
] | 2
|
2019-02-10T20:08:14.000Z
|
2022-02-23T01:23:59.000Z
|
import aiohttp
import asyncio
import collections
import time
RATE = collections.namedtuple(
"RateLimit", ("requests", "block", "rpad", "bpad", "retry")
)(50, 30, 2, 0.1, 900)
class ResetLock(asyncio.Lock):
__slots__ = "__xrlrs", "__xra", "__deferred"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__xrlrs = 0
self.__xra = None
self.__deferred = False
def __aexit__(self, exc_type, exc, tb):
if isinstance(exc, aiohttp.ClientResponseError):
if exc.status == 429:
self._xra(exc.headers["X-Retry-After"])
elif "X-ratelimit-requests-seen" in exc.headers:
self._xrlrs(exc.headers["X-ratelimit-requests-seen"])
# returns a coroutine
return super().__aexit__(exc_type, exc, tb)
# Prevent deprecated lock usage
__await__, __enter__, __iter__ = None, None, None
def _defer(self):
self.__deferred = True
self._loop.call_later(self.__xra - time.time(), self._release)
def release(self):
if not self.__deferred:
super().release()
def _release(self):
self.__deferred = False
super().release()
def _xra(self, xra: int):
xra = int(xra)
self.__xrlrs = 0
self.__xra = time.time() + xra + RATE.bpad
self._defer()
def _xrlrs(self, xrlrs: int):
now = time.time()
xrlrs = int(xrlrs)
if self.__xra is None or xrlrs < self.__xrlrs or self.__xra <= now:
self.__xra = now + RATE.block + RATE.bpad
self.__xrlrs = xrlrs
if xrlrs >= RATE.requests - RATE.rpad:
self._defer()
@property
def xra(self):
if self.__deferred:
return self.__xra
return None
@property
def xrlrs(self):
return self.__xrlrs
| 26.7
| 75
| 0.58641
|
0fd25e5a8086845ea5a5e624d6336530147eb96e
| 3,396
|
py
|
Python
|
dev/scripts/render_templated_sql.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 157
|
2018-04-30T16:27:53.000Z
|
2022-03-31T08:17:21.000Z
|
dev/scripts/render_templated_sql.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 3,250
|
2018-04-26T14:14:25.000Z
|
2022-03-31T23:49:15.000Z
|
dev/scripts/render_templated_sql.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 65
|
2018-05-10T14:11:50.000Z
|
2022-03-18T19:22:58.000Z
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Script to render valid SQL from a Jinja template."""
import argparse
import datetime
from uuid import uuid4
from jinjasql import JinjaSql
def valid_date(date_string):
"""Create date from date string."""
try:
datetime.datetime.strptime(date_string, "%Y-%m-%d")
except ValueError:
msg = f"{date_string} is an unsupported date format."
raise argparse.ArgumentTypeError(msg)
return date_string
def id_list(ids):
return ids.split(",")
def quote_sql_string(value):
"""
If "value" is a string type, escapes single quotes in the string
and returns the string enclosed in single quotes.
Thank you to https://towardsdatascience.com/a-simple-approach-to-templated-sql-queries-in-python-adc4f0dc511
"""
if isinstance(value, str):
new_value = str(value)
new_value = new_value.replace("'", "''")
return f"'{new_value}'"
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="sql_file", required=True, help="path to SQL template file")
parser.add_argument("-o", "--output-file", dest="output_file", required=False, help="path to output SQL file")
parser.add_argument(
"-s",
"--start-date",
metavar="YYYY-MM-DD",
dest="start_date",
required=False,
type=valid_date,
default=datetime.datetime.utcnow().date().replace(day=1).isoformat(),
help="Date to start generating data (YYYY-MM-DD)",
)
parser.add_argument(
"-e",
"--end-date",
metavar="YYYY-MM-DD",
dest="end_date",
required=False,
type=valid_date,
default=datetime.datetime.utcnow().date().isoformat(),
help="Date to end generating data (YYYY-MM-DD). Default is today.",
)
parser.add_argument("-d", "--schema", dest="schema", required=False, default="acct10001")
parser.add_argument(
"-b", "--bill-ids", dest="bill_ids", required=False, type=id_list, help="A comma separated list of bill IDs"
)
parser.add_argument("-c", "--cluster-id", dest="cluster_id", required=False, help="An OpenShift cluster ID")
parser.add_argument(
"--aws-uuid", dest="aws_provider_uuid", required=False, help="An provider UUID for an AWS provider"
)
parser.add_argument(
"--ocp-uuid", dest="ocp_provider_uuid", required=False, help="An provider UUID for an OpenShift provider"
)
parser.add_argument(
"--azure-uuid", dest="azure_provider_uuid", required=False, help="An provider UUID for an Azure provider"
)
parser.add_argument("--markup", dest="markup", required=False, help="A decimal value for markup")
args = parser.parse_args()
arg_dict = vars(args)
arg_dict["uuid"] = str(uuid4()).replace("-", "_")
sql_file = arg_dict.pop("sql_file")
output_file = arg_dict.pop("output_file")
with open(sql_file, "r") as f:
sql_template = f.read()
jinja_sql = JinjaSql()
sql_query, bind_params = jinja_sql.prepare_query(sql_template, arg_dict)
bind_params = [quote_sql_string(val) for val in bind_params]
query = sql_query % tuple(bind_params)
if output_file:
with open(output_file, "w") as f:
f.write(query)
else:
print(query)
| 33.623762
| 116
| 0.649293
|
42b6bb688850f5da84b9ea39c0f9641d5c5e4737
| 4,752
|
py
|
Python
|
new_test.py
|
trohit920/Custom_Yolo_Object_Detection
|
1cf9f774b02a747ae7c316879333ecf89382bea8
|
[
"MIT"
] | 1
|
2021-06-22T06:57:05.000Z
|
2021-06-22T06:57:05.000Z
|
new_test.py
|
trohit920/Custom_Yolo_Object_Detection
|
1cf9f774b02a747ae7c316879333ecf89382bea8
|
[
"MIT"
] | null | null | null |
new_test.py
|
trohit920/Custom_Yolo_Object_Detection
|
1cf9f774b02a747ae7c316879333ecf89382bea8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import random
from new_args import parse_anchors, read_class_names
from new_train import gpu_nms
from new_train import letterbox_resize
from new_train import yolov3
def get_color_table(class_num, seed=2):
random.seed(seed)
color_table = {}
for i in range(class_num):
color_table[i] = [random.randint(0, 255) for _ in range(3)]
return color_table
def plot_one_box(img, coord, label=None, color=None, line_thickness=None):
'''
coord: [x_min, y_min, x_max, y_max] format coordinates.
img: img to plot on.
label: str. The label name.
color: int. color index.
line_thickness: int. rectangle line thickness.
'''
tl = line_thickness or int(round(0.002 * max(img.shape[0:2]))) # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=float(tl) / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA)
parser = argparse.ArgumentParser(description="YOLO-V3 test single image test procedure.")
parser.add_argument("--input_image", type=str,
help="The path of the input image.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--letterbox_resize", type=lambda x: (str(x).lower() == 'true'), default=True,
help="Whether to use the letterbox resize.")
parser.add_argument("--class_name_path", type=str, default="./data/pitta.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./checkpoint_sent/yolov3.ckpt",
help="The path of the weights to restore.")
# parser.add_argument("--restore_path", type=str, default="./checkpoint/model-epoch_10_step_4014_loss_1.2792_lr_0.0001",
# help="The path of the weights to restore.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
img_ori = cv2.imread(args.input_image)
if args.letterbox_resize:
img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0], args.new_size[1])
else:
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
pred_feature_maps = yolo_model.forward(input_data, False)
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
# rescale the coordinates to the original image
if args.letterbox_resize:
boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
else:
boxes_[:, [0, 2]] *= (width_ori/float(args.new_size[0]))
boxes_[:, [1, 3]] *= (height_ori/float(args.new_size[1]))
print("box coords:")
print(boxes_)
print('*' * 30)
print("scores:")
print(scores_)
print('*' * 30)
print("labels:")
print(labels_)
for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]] + ', {:.2f}%'.format(scores_[i] * 100), color=color_table[labels_[i]])
cv2.imshow('Detection result', img_ori)
cv2.imwrite('detection_result.jpg', img_ori)
cv2.waitKey(0)
| 40.271186
| 149
| 0.66351
|
b2cc02570671e7c7390c1f76908cafbf6c1f99f1
| 73,367
|
py
|
Python
|
tencentcloud/zj/v20190121/models.py
|
qin5506/tencentcloud-sdk-python
|
e9c59d80beabf75fb96456bb8d7a53400346fe9a
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/zj/v20190121/models.py
|
qin5506/tencentcloud-sdk-python
|
e9c59d80beabf75fb96456bb8d7a53400346fe9a
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/zj/v20190121/models.py
|
qin5506/tencentcloud-sdk-python
|
e9c59d80beabf75fb96456bb8d7a53400346fe9a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddCrowdPackInfoRequest(AbstractModel):
"""AddCrowdPackInfo请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param Name: 人群包名称
:type Name: str
:param FileName: 人群包文件名称,人群包文件必须为utf8编码,动态参数只能是汉字、数字、英文字母的组合,不能包含其他字符
:type FileName: str
:param Desc: 人群包描述
:type Desc: str
:param CosUrl: 已经上传好的人群包cos地址
:type CosUrl: str
:param PhoneNum: 人群包手机号数量
:type PhoneNum: int
"""
self.License = None
self.Name = None
self.FileName = None
self.Desc = None
self.CosUrl = None
self.PhoneNum = None
def _deserialize(self, params):
self.License = params.get("License")
self.Name = params.get("Name")
self.FileName = params.get("FileName")
self.Desc = params.get("Desc")
self.CosUrl = params.get("CosUrl")
self.PhoneNum = params.get("PhoneNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddCrowdPackInfoResponse(AbstractModel):
"""AddCrowdPackInfo返回参数结构体
"""
def __init__(self):
"""
:param Data: 接口返回
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsAddCrowdPackInfoResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsAddCrowdPackInfoResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddSmsSignRequest(AbstractModel):
"""AddSmsSign请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param SignType: 签名类型。其中每种类型后面标注了其可选的 DocumentType(证明类型):
0:公司(0,1,2,3)。
1:APP(0,1,2,3,4) 。
2:网站(0,1,2,3,5)。
3:公众号或者小程序(0,1,2,3,6)。
4:商标(7)。
5:政府/机关事业单位/其他机构(2,3)。
注:必须按照对应关系选择证明类型,否则会审核失败。
:type SignType: int
:param DocumentType: 证明类型:
0:三证合一。
1:企业营业执照。
2:组织机构代码证书。
3:社会信用代码证书。
4:应用后台管理截图(个人开发APP)。
5:网站备案后台截图(个人开发网站)。
6:小程序设置页面截图(个人认证小程序)。
7:商标注册书
:type DocumentType: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
:param ProofImage: 资质图片url
:type ProofImage: str
:param SignName: 签名内容
:type SignName: str
:param Remark: 签名备注,比如申请原因,使用场景等,可以填空
:type Remark: str
"""
self.License = None
self.SignType = None
self.DocumentType = None
self.International = None
self.ProofImage = None
self.SignName = None
self.Remark = None
def _deserialize(self, params):
self.License = params.get("License")
self.SignType = params.get("SignType")
self.DocumentType = params.get("DocumentType")
self.International = params.get("International")
self.ProofImage = params.get("ProofImage")
self.SignName = params.get("SignName")
self.Remark = params.get("Remark")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddSmsSignResponse(AbstractModel):
"""AddSmsSign返回参数结构体
"""
def __init__(self):
"""
:param Data: 签名id数组
:type Data: :class:`tencentcloud.zj.v20190121.models.PaasCreateSignResp`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = PaasCreateSignResp()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddSmsTemplateDataStruct(AbstractModel):
"""短信模板创建接口返回
"""
def __init__(self):
"""
:param TemplateId: 短信模板ID
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddSmsTemplateRequest(AbstractModel):
"""AddSmsTemplate请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param SignID: 短信签名,创建签名时返回
:type SignID: int
:param TemplateName: 模板名称
:type TemplateName: str
:param TemplateContent: 短信内容,动态内容使用占位符{1},{2}等表示
:type TemplateContent: str
:param SmsType: 短信类型:{0:普通短信,1:营销短信}
:type SmsType: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
:param Remark: 短信模板标签
:type Remark: str
:param Urls: 发送短信活动时配置的落地链接地址,仅用作短信活动
:type Urls: list of str
:param CommonParams: 发送短信活动时用于展示人群包动态参数模板占位符序号或接口发送时变量占位符序号
:type CommonParams: list of int
:param UrlParams: 发送短信活动时用于展示短连接模板占位符序号,仅用作短信活动
:type UrlParams: list of int
"""
self.License = None
self.SignID = None
self.TemplateName = None
self.TemplateContent = None
self.SmsType = None
self.International = None
self.Remark = None
self.Urls = None
self.CommonParams = None
self.UrlParams = None
def _deserialize(self, params):
self.License = params.get("License")
self.SignID = params.get("SignID")
self.TemplateName = params.get("TemplateName")
self.TemplateContent = params.get("TemplateContent")
self.SmsType = params.get("SmsType")
self.International = params.get("International")
self.Remark = params.get("Remark")
self.Urls = params.get("Urls")
self.CommonParams = params.get("CommonParams")
self.UrlParams = params.get("UrlParams")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AddSmsTemplateResponse(AbstractModel):
"""AddSmsTemplate返回参数结构体
"""
def __init__(self):
"""
:param Data: 短信模板创建接口返回
:type Data: :class:`tencentcloud.zj.v20190121.models.AddSmsTemplateDataStruct`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = AddSmsTemplateDataStruct()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CancelActivityData(AbstractModel):
"""取消活动的返回值Data部分
"""
def __init__(self):
"""
:param Message: 成功返回时的文字描述
:type Message: str
"""
self.Message = None
def _deserialize(self, params):
self.Message = params.get("Message")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CancelCampaignRequest(AbstractModel):
"""CancelCampaign请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param CampaignId: 短信活动id
:type CampaignId: int
"""
self.License = None
self.CampaignId = None
def _deserialize(self, params):
self.License = params.get("License")
self.CampaignId = params.get("CampaignId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CancelCampaignResponse(AbstractModel):
"""CancelCampaign返回参数结构体
"""
def __init__(self):
"""
:param Data: 处理结果
:type Data: :class:`tencentcloud.zj.v20190121.models.CancelActivityData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = CancelActivityData()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateCampaignRequest(AbstractModel):
"""CreateCampaign请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param SendTime: 短信活动发送时间
:type SendTime: int
:param Name: 短信活动名称
:type Name: str
:param Strategies: 发送策略
:type Strategies: list of PaasStrategy
:param TemplateId: 废弃
:type TemplateId: int
:param CrowdID: 废弃
:type CrowdID: int
:param SmsType: 活动类型(0-短信,1-超短,不填默认为超短)
:type SmsType: int
"""
self.License = None
self.SendTime = None
self.Name = None
self.Strategies = None
self.TemplateId = None
self.CrowdID = None
self.SmsType = None
def _deserialize(self, params):
self.License = params.get("License")
self.SendTime = params.get("SendTime")
self.Name = params.get("Name")
if params.get("Strategies") is not None:
self.Strategies = []
for item in params.get("Strategies"):
obj = PaasStrategy()
obj._deserialize(item)
self.Strategies.append(obj)
self.TemplateId = params.get("TemplateId")
self.CrowdID = params.get("CrowdID")
self.SmsType = params.get("SmsType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateCampaignResponse(AbstractModel):
"""CreateCampaign返回参数结构体
"""
def __init__(self):
"""
:param Data: 活动信息
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsCreateCampaignResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsCreateCampaignResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMmsInstanceItem(AbstractModel):
"""创建样例时候content元素
"""
def __init__(self):
"""
:param ContentType: 素材类型:1-文本 2-图片 3-视频 4-音频
:type ContentType: int
:param Content: 素材内容:如果素材是文本类型,直接填写文本内容,否则填写素材文件上传到cos后的url地址
:type Content: str
"""
self.ContentType = None
self.Content = None
def _deserialize(self, params):
self.ContentType = params.get("ContentType")
self.Content = params.get("Content")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMmsInstanceRequest(AbstractModel):
"""CreateMmsInstance请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param InstanceName: 样例名称
:type InstanceName: str
:param Title: 标题
:type Title: str
:param Sign: 签名
:type Sign: str
:param Contents: 素材内容
:type Contents: list of CreateMmsInstanceItem
:param Urls: 样例中链接动态变量对应的链接,和占位符顺序一致
:type Urls: list of str
:param PhoneType: 机型列表
:type PhoneType: list of int non-negative
:param CommonParams: 发送超短活动时用于展示人群包动态参数模板占位符序号或接口发送时变量占位符序号
:type CommonParams: list of int non-negative
:param UrlParams: 发送超短活动时用于展示短连接模板占位符序号,仅用作超短活动
:type UrlParams: list of int non-negative
"""
self.License = None
self.InstanceName = None
self.Title = None
self.Sign = None
self.Contents = None
self.Urls = None
self.PhoneType = None
self.CommonParams = None
self.UrlParams = None
def _deserialize(self, params):
self.License = params.get("License")
self.InstanceName = params.get("InstanceName")
self.Title = params.get("Title")
self.Sign = params.get("Sign")
if params.get("Contents") is not None:
self.Contents = []
for item in params.get("Contents"):
obj = CreateMmsInstanceItem()
obj._deserialize(item)
self.Contents.append(obj)
self.Urls = params.get("Urls")
self.PhoneType = params.get("PhoneType")
self.CommonParams = params.get("CommonParams")
self.UrlParams = params.get("UrlParams")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMmsInstanceResp(AbstractModel):
"""创建超级短信样例返回结果
"""
def __init__(self):
"""
:param ReturnCode: 返回码:0-成功 其它-失败
:type ReturnCode: int
:param ReturnMsg: 返回信息
:type ReturnMsg: str
:param InstanceId: 样例id
:type InstanceId: int
"""
self.ReturnCode = None
self.ReturnMsg = None
self.InstanceId = None
def _deserialize(self, params):
self.ReturnCode = params.get("ReturnCode")
self.ReturnMsg = params.get("ReturnMsg")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMmsInstanceResponse(AbstractModel):
"""CreateMmsInstance返回参数结构体
"""
def __init__(self):
"""
:param Data: 创建样例返回信息
:type Data: :class:`tencentcloud.zj.v20190121.models.CreateMmsInstanceResp`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = CreateMmsInstanceResp()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelCrowdPackRequest(AbstractModel):
"""DelCrowdPack请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param ID: 人群包id
:type ID: int
"""
self.License = None
self.ID = None
def _deserialize(self, params):
self.License = params.get("License")
self.ID = params.get("ID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelCrowdPackResponse(AbstractModel):
"""DelCrowdPack返回参数结构体
"""
def __init__(self):
"""
:param Data: 接口返回
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsSuccessResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsSuccessResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelMmsInstanceData(AbstractModel):
"""删除超短样例响应
"""
def __init__(self):
"""
:param InstanceId: 样例id
:type InstanceId: int
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelTemplateRequest(AbstractModel):
"""DelTemplate请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param TemplateID: 短信模板ID
:type TemplateID: int
"""
self.License = None
self.TemplateID = None
def _deserialize(self, params):
self.License = params.get("License")
self.TemplateID = params.get("TemplateID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelTemplateResponse(AbstractModel):
"""DelTemplate返回参数结构体
"""
def __init__(self):
"""
:param Data: 接口返回
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsSuccessResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsSuccessResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteMmsInstanceRequest(AbstractModel):
"""DeleteMmsInstance请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param InstanceId: 超级短信样例id
:type InstanceId: int
"""
self.License = None
self.InstanceId = None
def _deserialize(self, params):
self.License = params.get("License")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteMmsInstanceResponse(AbstractModel):
"""DeleteMmsInstance返回参数结构体
"""
def __init__(self):
"""
:param Data: 删除信息返回
:type Data: :class:`tencentcloud.zj.v20190121.models.DelMmsInstanceData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = DelMmsInstanceData()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMmsInstanceInfoRequest(AbstractModel):
"""DescribeMmsInstanceInfo请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param InstanceId: 彩信实例id
:type InstanceId: int
"""
self.License = None
self.InstanceId = None
def _deserialize(self, params):
self.License = params.get("License")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMmsInstanceInfoResponse(AbstractModel):
"""DescribeMmsInstanceInfo返回参数结构体
"""
def __init__(self):
"""
:param Data: 彩信实例信息
:type Data: :class:`tencentcloud.zj.v20190121.models.MmsInstanceInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = MmsInstanceInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMmsInstanceListRequest(AbstractModel):
"""DescribeMmsInstanceList请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param Offset: 偏移量
:type Offset: int
:param Limit: 返回数量
:type Limit: int
:param AppSubId: 业务代码
:type AppSubId: str
:param Title: 实例标题
:type Title: str
"""
self.License = None
self.Offset = None
self.Limit = None
self.AppSubId = None
self.Title = None
def _deserialize(self, params):
self.License = params.get("License")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.AppSubId = params.get("AppSubId")
self.Title = params.get("Title")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMmsInstanceListResponse(AbstractModel):
"""DescribeMmsInstanceList返回参数结构体
"""
def __init__(self):
"""
:param Data: 彩信实例信息列表返回
:type Data: :class:`tencentcloud.zj.v20190121.models.MmsInstanceInfoList`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = MmsInstanceInfoList()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsCampaignStatisticsRequest(AbstractModel):
"""DescribeSmsCampaignStatistics请求参数结构体
"""
def __init__(self):
"""
:param CampaignId: 活动id
:type CampaignId: int
:param License: 商户证书
:type License: str
"""
self.CampaignId = None
self.License = None
def _deserialize(self, params):
self.CampaignId = params.get("CampaignId")
self.License = params.get("License")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsCampaignStatisticsResponse(AbstractModel):
"""DescribeSmsCampaignStatistics返回参数结构体
"""
def __init__(self):
"""
:param Data: 响应数据
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsCampaignStatisticsData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsCampaignStatisticsData()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsSignListDataStruct(AbstractModel):
"""获取普通短信签名信息返回信息
"""
def __init__(self):
"""
:param SignId: 签名Id
:type SignId: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
:param StatusCode: 申请签名状态。其中:
0:表示审核通过。
-1:表示审核未通过或审核失败。
:type StatusCode: int
:param ReviewReply: 审核回复,审核人员审核后给出的回复,通常是审核未通过的原因。
:type ReviewReply: str
:param SignName: 签名名称。
:type SignName: str
:param CreateTime: 提交审核时间,UNIX 时间戳(单位:秒)。
:type CreateTime: int
"""
self.SignId = None
self.International = None
self.StatusCode = None
self.ReviewReply = None
self.SignName = None
self.CreateTime = None
def _deserialize(self, params):
self.SignId = params.get("SignId")
self.International = params.get("International")
self.StatusCode = params.get("StatusCode")
self.ReviewReply = params.get("ReviewReply")
self.SignName = params.get("SignName")
self.CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsSignListRequest(AbstractModel):
"""DescribeSmsSignList请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param SignIdSet: 签名ID数组
:type SignIdSet: list of int non-negative
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
"""
self.License = None
self.SignIdSet = None
self.International = None
def _deserialize(self, params):
self.License = params.get("License")
self.SignIdSet = params.get("SignIdSet")
self.International = params.get("International")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsSignListResponse(AbstractModel):
"""DescribeSmsSignList返回参数结构体
"""
def __init__(self):
"""
:param Data: 返回数据
:type Data: list of DescribeSmsSignListDataStruct
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DescribeSmsSignListDataStruct()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsTemplateListDataStruct(AbstractModel):
"""获取短信模板状态返回
"""
def __init__(self):
"""
:param TemplateId: 模板Id
:type TemplateId: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
:param StatusCode: 申请签名状态。其中:
0:表示审核通过。
-1:表示审核未通过或审核失败。
:type StatusCode: int
:param ReviewReply: 审核回复,审核人员审核后给出的回复,通常是审核未通过的原因。
:type ReviewReply: str
:param TemplateName: 模板名称。
:type TemplateName: str
:param CreateTime: 提交审核时间,UNIX 时间戳(单位:秒)。
:type CreateTime: int
"""
self.TemplateId = None
self.International = None
self.StatusCode = None
self.ReviewReply = None
self.TemplateName = None
self.CreateTime = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.International = params.get("International")
self.StatusCode = params.get("StatusCode")
self.ReviewReply = params.get("ReviewReply")
self.TemplateName = params.get("TemplateName")
self.CreateTime = params.get("CreateTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsTemplateListRequest(AbstractModel):
"""DescribeSmsTemplateList请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param TemplateIdSet: 短信模板id数组
:type TemplateIdSet: list of int non-negative
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
"""
self.License = None
self.TemplateIdSet = None
self.International = None
def _deserialize(self, params):
self.License = params.get("License")
self.TemplateIdSet = params.get("TemplateIdSet")
self.International = params.get("International")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSmsTemplateListResponse(AbstractModel):
"""DescribeSmsTemplateList返回参数结构体
"""
def __init__(self):
"""
:param Data: 返回数据信息
:type Data: list of DescribeSmsTemplateListDataStruct
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = DescribeSmsTemplateListDataStruct()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetCrowdPackListRequest(AbstractModel):
"""GetCrowdPackList请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param Offset: 偏移量
:type Offset: int
:param Limit: 限制返回数量
:type Limit: int
:param Name: 人群包名称,用于过滤人群包
:type Name: str
:param Status: 人群包状态,默认-1,用于过滤人群包
:type Status: int
"""
self.License = None
self.Offset = None
self.Limit = None
self.Name = None
self.Status = None
def _deserialize(self, params):
self.License = params.get("License")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Name = params.get("Name")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetCrowdPackListResponse(AbstractModel):
"""GetCrowdPackList返回参数结构体
"""
def __init__(self):
"""
:param Data: 人群包信息列表
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsGetCrowdPackListResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsGetCrowdPackListResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetCrowdUploadInfoRequest(AbstractModel):
"""GetCrowdUploadInfo请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param FileName: 上传文件名称
:type FileName: str
"""
self.License = None
self.FileName = None
def _deserialize(self, params):
self.License = params.get("License")
self.FileName = params.get("FileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetCrowdUploadInfoResponse(AbstractModel):
"""GetCrowdUploadInfo返回参数结构体
"""
def __init__(self):
"""
:param Data: 返回信息
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsGetCrowdUploadInfoResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsGetCrowdUploadInfoResponse()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetSmsAmountInfoRequest(AbstractModel):
"""GetSmsAmountInfo请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
"""
self.License = None
def _deserialize(self, params):
self.License = params.get("License")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetSmsAmountInfoResponse(AbstractModel):
"""GetSmsAmountInfo返回参数结构体
"""
def __init__(self):
"""
:param Data: 短信账号额度接口
:type Data: :class:`tencentcloud.zj.v20190121.models.SmsAmountDataStruct`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = SmsAmountDataStruct()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetSmsCampaignStatusRequest(AbstractModel):
"""GetSmsCampaignStatus请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param CampaignId: 活动ID
:type CampaignId: int
"""
self.License = None
self.CampaignId = None
def _deserialize(self, params):
self.License = params.get("License")
self.CampaignId = params.get("CampaignId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class GetSmsCampaignStatusResponse(AbstractModel):
"""GetSmsCampaignStatus返回参数结构体
"""
def __init__(self):
"""
:param Data: 活动状态
:type Data: :class:`tencentcloud.zj.v20190121.models.PaasSmsCampaignStatusResp`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = PaasSmsCampaignStatusResp()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MmsInstanceInfo(AbstractModel):
"""彩信实例信息
InstanceId int
InstanceName string
Status int
StatusInfo string
AppSubId string
Title string
Sign string
Contents string
CreatedAt string
"""
def __init__(self):
"""
:param InstanceId: 彩信实例id
:type InstanceId: int
:param InstanceName: 彩信实例名称
:type InstanceName: str
:param Status: 状态是否通知
:type Status: int
:param StatusInfo: 实例审核状态信息
注意:此字段可能返回 null,表示取不到有效值。
:type StatusInfo: list of MmsInstanceStateInfo
:param AppSubId: 业务码
:type AppSubId: str
:param Title: 彩信标题
:type Title: str
:param Sign: 签名
:type Sign: str
:param Contents: 彩信内容
:type Contents: str
:param CreatedAt: 创建时间
:type CreatedAt: str
:param Urls: 样例配置的链接地址
注意:此字段可能返回 null,表示取不到有效值。
:type Urls: list of str
:param PhoneType: 机型列表
注意:此字段可能返回 null,表示取不到有效值。
:type PhoneType: list of int non-negative
:param CommonParams: 普通参数序号数组
注意:此字段可能返回 null,表示取不到有效值。
:type CommonParams: list of int non-negative
:param UrlParams: 链接参数序号数组
注意:此字段可能返回 null,表示取不到有效值。
:type UrlParams: list of int non-negative
"""
self.InstanceId = None
self.InstanceName = None
self.Status = None
self.StatusInfo = None
self.AppSubId = None
self.Title = None
self.Sign = None
self.Contents = None
self.CreatedAt = None
self.Urls = None
self.PhoneType = None
self.CommonParams = None
self.UrlParams = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Status = params.get("Status")
if params.get("StatusInfo") is not None:
self.StatusInfo = []
for item in params.get("StatusInfo"):
obj = MmsInstanceStateInfo()
obj._deserialize(item)
self.StatusInfo.append(obj)
self.AppSubId = params.get("AppSubId")
self.Title = params.get("Title")
self.Sign = params.get("Sign")
self.Contents = params.get("Contents")
self.CreatedAt = params.get("CreatedAt")
self.Urls = params.get("Urls")
self.PhoneType = params.get("PhoneType")
self.CommonParams = params.get("CommonParams")
self.UrlParams = params.get("UrlParams")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MmsInstanceInfoList(AbstractModel):
"""彩信实例状态列表
"""
def __init__(self):
"""
:param Total: 总数据量
:type Total: int
:param List: 彩信实例状态信息列表
:type List: list of MmsInstanceInfo
"""
self.Total = None
self.List = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = MmsInstanceInfo()
obj._deserialize(item)
self.List.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MmsInstanceStateInfo(AbstractModel):
"""彩信实例审核状态
"""
def __init__(self):
"""
:param Operator: 运营商
:type Operator: str
:param State: 审核状态:0未审核,1审核通过,2审核拒绝
:type State: int
"""
self.Operator = None
self.State = None
def _deserialize(self, params):
self.Operator = params.get("Operator")
self.State = params.get("State")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySmsTemplateDataStruct(AbstractModel):
"""短信模板编辑接口出参
"""
def __init__(self):
"""
:param TemplateId: 短信模板id
注意:此字段可能返回 null,表示取不到有效值。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySmsTemplateRequest(AbstractModel):
"""ModifySmsTemplate请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param TemplateId: 短信模板id
:type TemplateId: int
:param SignID: 短信签名,创建签名时返回
:type SignID: int
:param TemplateName: 模板名称
:type TemplateName: str
:param TemplateContent: 短信内容,动态内容使用占位符{1},{2}等表示
:type TemplateContent: str
:param SmsType: 短信类型:{0:普通短信,1:营销短信}
:type SmsType: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。
:type International: int
:param Remark: 短信模板标签
:type Remark: str
:param Urls: 发送短信活动时配置的落地链接地址,仅用作短信活动
:type Urls: list of str
:param CommonParams: 发送短信活动时用于展示人群包动态参数模板占位符序号,仅用作短信活动
:type CommonParams: list of int
:param UrlParams: 发送短信活动时用于展示短连接模板占位符序号,仅用作短信活动
:type UrlParams: list of int
"""
self.License = None
self.TemplateId = None
self.SignID = None
self.TemplateName = None
self.TemplateContent = None
self.SmsType = None
self.International = None
self.Remark = None
self.Urls = None
self.CommonParams = None
self.UrlParams = None
def _deserialize(self, params):
self.License = params.get("License")
self.TemplateId = params.get("TemplateId")
self.SignID = params.get("SignID")
self.TemplateName = params.get("TemplateName")
self.TemplateContent = params.get("TemplateContent")
self.SmsType = params.get("SmsType")
self.International = params.get("International")
self.Remark = params.get("Remark")
self.Urls = params.get("Urls")
self.CommonParams = params.get("CommonParams")
self.UrlParams = params.get("UrlParams")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySmsTemplateResponse(AbstractModel):
"""ModifySmsTemplate返回参数结构体
"""
def __init__(self):
"""
:param Data: 返回
:type Data: :class:`tencentcloud.zj.v20190121.models.ModifySmsTemplateDataStruct`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = ModifySmsTemplateDataStruct()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PaasCreateSignResp(AbstractModel):
"""创建签名返回结构
"""
def __init__(self):
"""
:param SignId: 签名id
:type SignId: int
"""
self.SignId = None
def _deserialize(self, params):
self.SignId = params.get("SignId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PaasSmsCampaignStatusResp(AbstractModel):
"""拉取活动状态返回
"""
def __init__(self):
"""
:param Status: 0-未发送 1-发送中 2-发送结束 3-发送取消
:type Status: int
"""
self.Status = None
def _deserialize(self, params):
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PaasStrategy(AbstractModel):
"""短信发送人群包策略
"""
def __init__(self):
"""
:param CrowdID: 人群包id
:type CrowdID: int
:param Items: 待选素材数组
:type Items: list of PaasStrategyItem
"""
self.CrowdID = None
self.Items = None
def _deserialize(self, params):
self.CrowdID = params.get("CrowdID")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = PaasStrategyItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PaasStrategyItem(AbstractModel):
"""短信活动策略元素
"""
def __init__(self):
"""
:param Id: 短信模板id或超级短信样例id
:type Id: int
:param ContentType: 素材类型 0-普短 1-超短
:type ContentType: int
"""
self.Id = None
self.ContentType = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.ContentType = params.get("ContentType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PushMmsContentRequest(AbstractModel):
"""PushMmsContent请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param InstanceId: 素材样例id
:type InstanceId: int
:param Tel: 手机号
:type Tel: str
:param Session: 附带数据字段
:type Session: str
:param DynamicParaKey: 动态参数key(即申请样例时设置的u_或p_开头的动态参数,要求序号有序)
:type DynamicParaKey: list of str
:param DynamicParaValue: 动态参数值,和DynamicParaKey对应
:type DynamicParaValue: list of str
"""
self.License = None
self.InstanceId = None
self.Tel = None
self.Session = None
self.DynamicParaKey = None
self.DynamicParaValue = None
def _deserialize(self, params):
self.License = params.get("License")
self.InstanceId = params.get("InstanceId")
self.Tel = params.get("Tel")
self.Session = params.get("Session")
self.DynamicParaKey = params.get("DynamicParaKey")
self.DynamicParaValue = params.get("DynamicParaValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PushMmsContentResp(AbstractModel):
"""发送超级短信返回
"""
def __init__(self):
"""
:param ReturnCode: 返回码:0-成功 其它-失败
:type ReturnCode: int
:param ReturnMsg: 返回信息
:type ReturnMsg: str
:param MessageId: 消息回执id
:type MessageId: int
"""
self.ReturnCode = None
self.ReturnMsg = None
self.MessageId = None
def _deserialize(self, params):
self.ReturnCode = params.get("ReturnCode")
self.ReturnMsg = params.get("ReturnMsg")
self.MessageId = params.get("MessageId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PushMmsContentResponse(AbstractModel):
"""PushMmsContent返回参数结构体
"""
def __init__(self):
"""
:param Data: 推送短信返回信息
:type Data: :class:`tencentcloud.zj.v20190121.models.PushMmsContentResp`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = PushMmsContentResp()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SendSmsPaasDataStruct(AbstractModel):
"""发送短信返回
"""
def __init__(self):
"""
:param SerialNo: 发送流水号
:type SerialNo: str
:param PhoneNumber: 手机号码,e.164标准,+[国家或地区码][手机号] ,示例如:+8613711112222, 其中前面有一个+号 ,86为国家码,13711112222为手机号。
:type PhoneNumber: str
:param Fee: 计费条数
:type Fee: int
:param Code: OK为成功
:type Code: str
:param Message: 短信请求错误码描述
:type Message: str
"""
self.SerialNo = None
self.PhoneNumber = None
self.Fee = None
self.Code = None
self.Message = None
def _deserialize(self, params):
self.SerialNo = params.get("SerialNo")
self.PhoneNumber = params.get("PhoneNumber")
self.Fee = params.get("Fee")
self.Code = params.get("Code")
self.Message = params.get("Message")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SendSmsRequest(AbstractModel):
"""SendSms请求参数结构体
"""
def __init__(self):
"""
:param License: 商户证书
:type License: str
:param Phone: 手机号码,采用 e.164 标准,格式为+[国家或地区码][手机号],单次请求最多支持200个手机号且要求全为境内手机号,如:+8613800138000
:type Phone: list of str
:param TemplateId: 短信模板id(推荐使用模板id发送,使用内容发送时模板id留空)
:type TemplateId: str
:param Params: 模板参数,若无模板参数,则设置为空。
:type Params: list of str
:param Sign: 短信签名内容,使用 UTF-8 编码,必须填写已审核通过的签名。注:国内短信为必填参数。
:type Sign: str
:param SenderId: 国际/港澳台短信 senderid,国内短信填空
:type SenderId: str
:param SmsType: 短信类型:{0:普通短信,1:营销短信},使用内容发送时必填
:type SmsType: int
:param International: 是否国际/港澳台短信:
0:表示国内短信。
1:表示国际/港澳台短信。使用内容发送时必填
:type International: int
:param Content: 发送使用的模板内容,如果有占位符,此处也包括占位符,占位符的实际内容通过Params参数传递,使用模板id发送时此字段为空
:type Content: str
"""
self.License = None
self.Phone = None
self.TemplateId = None
self.Params = None
self.Sign = None
self.SenderId = None
self.SmsType = None
self.International = None
self.Content = None
def _deserialize(self, params):
self.License = params.get("License")
self.Phone = params.get("Phone")
self.TemplateId = params.get("TemplateId")
self.Params = params.get("Params")
self.Sign = params.get("Sign")
self.SenderId = params.get("SenderId")
self.SmsType = params.get("SmsType")
self.International = params.get("International")
self.Content = params.get("Content")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SendSmsResponse(AbstractModel):
"""SendSms返回参数结构体
"""
def __init__(self):
"""
:param Data: 出参数据
:type Data: list of SendSmsPaasDataStruct
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = SendSmsPaasDataStruct()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsAddCrowdPackInfoResponse(AbstractModel):
"""添加短信人群包信息接口返回
"""
def __init__(self):
"""
:param ID: 人群包id
:type ID: int
"""
self.ID = None
def _deserialize(self, params):
self.ID = params.get("ID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsAmountDataStruct(AbstractModel):
"""短信子账号额度接口出参
"""
def __init__(self):
"""
:param SmsCampaignAmount: 短信活动配额
:type SmsCampaignAmount: int
:param SmsCampaignConsume: 短信活动消耗配额
:type SmsCampaignConsume: int
:param SmsSendAmount: 短信发送额度
:type SmsSendAmount: int
:param SmsSendConsume: 短信发送消耗额度
:type SmsSendConsume: int
:param MmsCampaignAmount: 超短活动额度
:type MmsCampaignAmount: int
:param MmsCampaignConsume: 超短活动消耗额度
:type MmsCampaignConsume: int
:param MmsSendAmount: 超短短信额度
:type MmsSendAmount: int
:param MmsSendConsume: 超短短信消耗额度
:type MmsSendConsume: int
"""
self.SmsCampaignAmount = None
self.SmsCampaignConsume = None
self.SmsSendAmount = None
self.SmsSendConsume = None
self.MmsCampaignAmount = None
self.MmsCampaignConsume = None
self.MmsSendAmount = None
self.MmsSendConsume = None
def _deserialize(self, params):
self.SmsCampaignAmount = params.get("SmsCampaignAmount")
self.SmsCampaignConsume = params.get("SmsCampaignConsume")
self.SmsSendAmount = params.get("SmsSendAmount")
self.SmsSendConsume = params.get("SmsSendConsume")
self.MmsCampaignAmount = params.get("MmsCampaignAmount")
self.MmsCampaignConsume = params.get("MmsCampaignConsume")
self.MmsSendAmount = params.get("MmsSendAmount")
self.MmsSendConsume = params.get("MmsSendConsume")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsCampaignStatisticsCrowdData(AbstractModel):
"""短信活动统计人群包数据
"""
def __init__(self):
"""
:param CrowdId: 人群包id
:type CrowdId: int
:param CrowdName: 人群包名称
:type CrowdName: str
:param CrowdCount: 人群包目标触达总数
:type CrowdCount: int
:param TemplateList: 模板列表
:type TemplateList: list of SmsCampaignStatisticsTemplateData
"""
self.CrowdId = None
self.CrowdName = None
self.CrowdCount = None
self.TemplateList = None
def _deserialize(self, params):
self.CrowdId = params.get("CrowdId")
self.CrowdName = params.get("CrowdName")
self.CrowdCount = params.get("CrowdCount")
if params.get("TemplateList") is not None:
self.TemplateList = []
for item in params.get("TemplateList"):
obj = SmsCampaignStatisticsTemplateData()
obj._deserialize(item)
self.TemplateList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsCampaignStatisticsData(AbstractModel):
"""短信活动统计响应
"""
def __init__(self):
"""
:param CampaignId: 活动Id
:type CampaignId: int
:param Statistics: 统计数据
:type Statistics: list of SmsCampaignStatisticsCrowdData
"""
self.CampaignId = None
self.Statistics = None
def _deserialize(self, params):
self.CampaignId = params.get("CampaignId")
if params.get("Statistics") is not None:
self.Statistics = []
for item in params.get("Statistics"):
obj = SmsCampaignStatisticsCrowdData()
obj._deserialize(item)
self.Statistics.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsCampaignStatisticsTemplateData(AbstractModel):
"""短信活动统计模板展示结构
"""
def __init__(self):
"""
:param TemplateId: 模板或样例id
:type TemplateId: str
:param TemplateContent: 模板内容
:type TemplateContent: str
:param SendCount: 触达成功数
:type SendCount: int
:param ClickCount: 短链点击数
:type ClickCount: int
"""
self.TemplateId = None
self.TemplateContent = None
self.SendCount = None
self.ClickCount = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateContent = params.get("TemplateContent")
self.SendCount = params.get("SendCount")
self.ClickCount = params.get("ClickCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsCreateCampaignResponse(AbstractModel):
"""创建短信活动返回结构
"""
def __init__(self):
"""
:param CampaignId: 活动id
:type CampaignId: int
"""
self.CampaignId = None
def _deserialize(self, params):
self.CampaignId = params.get("CampaignId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsGetCrowdPackList(AbstractModel):
"""短信获取人群包列表的返回数据信息
"""
def __init__(self):
"""
:param CreatedAt: 创建时间
:type CreatedAt: str
:param ID: 人群包id
:type ID: int
:param Name: 人群包名称
:type Name: str
:param Status: 人群包状态
:type Status: int
:param PhoneNum: 人群包手机号数量
:type PhoneNum: int
:param Tag: 人群包标签信息
:type Tag: str
:param MD5: 人群包md5
:type MD5: str
:param FileName: 人群包文件名称
:type FileName: str
:param Desc: 人群包描述
:type Desc: str
"""
self.CreatedAt = None
self.ID = None
self.Name = None
self.Status = None
self.PhoneNum = None
self.Tag = None
self.MD5 = None
self.FileName = None
self.Desc = None
def _deserialize(self, params):
self.CreatedAt = params.get("CreatedAt")
self.ID = params.get("ID")
self.Name = params.get("Name")
self.Status = params.get("Status")
self.PhoneNum = params.get("PhoneNum")
self.Tag = params.get("Tag")
self.MD5 = params.get("MD5")
self.FileName = params.get("FileName")
self.Desc = params.get("Desc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsGetCrowdPackListResponse(AbstractModel):
"""短信人群包返回信息
"""
def __init__(self):
"""
:param Total: 人群包总数
:type Total: int
:param List: 人群包返回数据列表
注意:此字段可能返回 null,表示取不到有效值。
:type List: list of SmsGetCrowdPackList
"""
self.Total = None
self.List = None
def _deserialize(self, params):
self.Total = params.get("Total")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = SmsGetCrowdPackList()
obj._deserialize(item)
self.List.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsGetCrowdUploadInfoResponse(AbstractModel):
"""获取短信人群包上传信息返回
"""
def __init__(self):
"""
:param ExpiredTime: 过期时间
:type ExpiredTime: int
:param SessionToken: 会话token
:type SessionToken: str
:param TmpSecretId: 临时密钥id
:type TmpSecretId: str
:param TmpSecretKey: 临时密钥
:type TmpSecretKey: str
:param CosInfo: cos信息
:type CosInfo: :class:`tencentcloud.zj.v20190121.models.UploadFansInfoCosInfo`
"""
self.ExpiredTime = None
self.SessionToken = None
self.TmpSecretId = None
self.TmpSecretKey = None
self.CosInfo = None
def _deserialize(self, params):
self.ExpiredTime = params.get("ExpiredTime")
self.SessionToken = params.get("SessionToken")
self.TmpSecretId = params.get("TmpSecretId")
self.TmpSecretKey = params.get("TmpSecretKey")
if params.get("CosInfo") is not None:
self.CosInfo = UploadFansInfoCosInfo()
self.CosInfo._deserialize(params.get("CosInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SmsSuccessResponse(AbstractModel):
"""短信api成功返回信息
"""
def __init__(self):
"""
:param Message: 成功返回信息
:type Message: str
"""
self.Message = None
def _deserialize(self, params):
self.Message = params.get("Message")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UploadFansInfoCosInfo(AbstractModel):
"""接口返回给服务商的COS路径等信息
"""
def __init__(self):
"""
:param Bucket: COS bucket
:type Bucket: str
:param Key: COS路径
:type Key: str
:param Region: COS区域
:type Region: str
"""
self.Bucket = None
self.Key = None
self.Region = None
def _deserialize(self, params):
self.Bucket = params.get("Bucket")
self.Key = params.get("Key")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| 29.464659
| 111
| 0.585045
|
03957807fe96daebc991be138aca614d6113e7df
| 7,852
|
py
|
Python
|
ultracart/models/coupon_free_items_with_item_purchase.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | 1
|
2018-03-15T16:56:23.000Z
|
2018-03-15T16:56:23.000Z
|
ultracart/models/coupon_free_items_with_item_purchase.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
ultracart/models/coupon_free_items_with_item_purchase.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CouponFreeItemsWithItemPurchase(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'free_item': 'str',
'free_quantity': 'int',
'limit': 'int',
'required_purchase_item': 'str',
'required_purchase_quantity': 'int'
}
attribute_map = {
'free_item': 'free_item',
'free_quantity': 'free_quantity',
'limit': 'limit',
'required_purchase_item': 'required_purchase_item',
'required_purchase_quantity': 'required_purchase_quantity'
}
def __init__(self, free_item=None, free_quantity=None, limit=None, required_purchase_item=None, required_purchase_quantity=None): # noqa: E501
"""CouponFreeItemsWithItemPurchase - a model defined in Swagger""" # noqa: E501
self._free_item = None
self._free_quantity = None
self._limit = None
self._required_purchase_item = None
self._required_purchase_quantity = None
self.discriminator = None
if free_item is not None:
self.free_item = free_item
if free_quantity is not None:
self.free_quantity = free_quantity
if limit is not None:
self.limit = limit
if required_purchase_item is not None:
self.required_purchase_item = required_purchase_item
if required_purchase_quantity is not None:
self.required_purchase_quantity = required_purchase_quantity
@property
def free_item(self):
"""Gets the free_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
The item id of the free item that will be received when the required mix and match group quantity is purchased. # noqa: E501
:return: The free_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
:rtype: str
"""
return self._free_item
@free_item.setter
def free_item(self, free_item):
"""Sets the free_item of this CouponFreeItemsWithItemPurchase.
The item id of the free item that will be received when the required mix and match group quantity is purchased. # noqa: E501
:param free_item: The free_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
:type: str
"""
self._free_item = free_item
@property
def free_quantity(self):
"""Gets the free_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
The quantity of free item that will be received. # noqa: E501
:return: The free_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
:rtype: int
"""
return self._free_quantity
@free_quantity.setter
def free_quantity(self, free_quantity):
"""Sets the free_quantity of this CouponFreeItemsWithItemPurchase.
The quantity of free item that will be received. # noqa: E501
:param free_quantity: The free_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
:type: int
"""
self._free_quantity = free_quantity
@property
def limit(self):
"""Gets the limit of this CouponFreeItemsWithItemPurchase. # noqa: E501
The limit of free items that may be received when purchasing multiple items # noqa: E501
:return: The limit of this CouponFreeItemsWithItemPurchase. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this CouponFreeItemsWithItemPurchase.
The limit of free items that may be received when purchasing multiple items # noqa: E501
:param limit: The limit of this CouponFreeItemsWithItemPurchase. # noqa: E501
:type: int
"""
self._limit = limit
@property
def required_purchase_item(self):
"""Gets the required_purchase_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
Required item that must be purchased for coupon to be valid # noqa: E501
:return: The required_purchase_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
:rtype: str
"""
return self._required_purchase_item
@required_purchase_item.setter
def required_purchase_item(self, required_purchase_item):
"""Sets the required_purchase_item of this CouponFreeItemsWithItemPurchase.
Required item that must be purchased for coupon to be valid # noqa: E501
:param required_purchase_item: The required_purchase_item of this CouponFreeItemsWithItemPurchase. # noqa: E501
:type: str
"""
self._required_purchase_item = required_purchase_item
@property
def required_purchase_quantity(self):
"""Gets the required_purchase_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
Required quantity of mix and match group items that must be purchased for coupon to be valid # noqa: E501
:return: The required_purchase_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
:rtype: int
"""
return self._required_purchase_quantity
@required_purchase_quantity.setter
def required_purchase_quantity(self, required_purchase_quantity):
"""Sets the required_purchase_quantity of this CouponFreeItemsWithItemPurchase.
Required quantity of mix and match group items that must be purchased for coupon to be valid # noqa: E501
:param required_purchase_quantity: The required_purchase_quantity of this CouponFreeItemsWithItemPurchase. # noqa: E501
:type: int
"""
self._required_purchase_quantity = required_purchase_quantity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CouponFreeItemsWithItemPurchase, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CouponFreeItemsWithItemPurchase):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.13913
| 147
| 0.646714
|
f5cbbefbec56249cc31bf1420801ff13af96eccb
| 6,454
|
py
|
Python
|
gqa/function/test.py
|
xiling42/VL-BERT
|
4573b4e1e82b6c092d4830d0b88821e9ee1a81fb
|
[
"MIT"
] | null | null | null |
gqa/function/test.py
|
xiling42/VL-BERT
|
4573b4e1e82b6c092d4830d0b88821e9ee1a81fb
|
[
"MIT"
] | null | null | null |
gqa/function/test.py
|
xiling42/VL-BERT
|
4573b4e1e82b6c092d4830d0b88821e9ee1a81fb
|
[
"MIT"
] | null | null | null |
import os
import pprint
import shutil
import pandas as pd
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn.functional as F
from common.utils.load import smart_load_model_state_dict
from common.trainer import to_cuda
from common.utils.create_logger import create_logger
from gqa.modules import *
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
# submit csv should contain following columns:
# annot_id,
# answer_0,answer_1,answer_2,answer_3,
# rationale_conditioned_on_a0_0,rationale_conditioned_on_a0_1,rationale_conditioned_on_a0_2,rationale_conditioned_on_a0_3,
# rationale_conditioned_on_a1_0,rationale_conditioned_on_a1_1,rationale_conditioned_on_a1_2,rationale_conditioned_on_a1_3,
# rationale_conditioned_on_a2_0,rationale_conditioned_on_a2_1,rationale_conditioned_on_a2_2,rationale_conditioned_on_a2_3,
# rationale_conditioned_on_a3_0,rationale_conditioned_on_a3_1,rationale_conditioned_on_a3_2,rationale_conditioned_on_a3_3
@torch.no_grad()
def test_net(args, config, ckpt_path=None, save_path=None, save_name=None):
if save_path is None:
logger, test_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TEST_IMAGE_SET,
split='test')
save_path = test_output_path
if save_name is None:
save_name = config.MODEL_PREFIX
if not os.path.exists(save_path):
os.makedirs(save_path)
result_csv_path = os.path.join(save_path,
'{}_test_result_{}.csv'.format(save_name, config.DATASET.TASK))
if args.use_cache and os.path.isfile(result_csv_path):
print("Cache found in {}, skip test!".format(result_csv_path))
return result_csv_path
print('test net...')
pprint.pprint(args)
pprint.pprint(config)
device_ids = [int(d) for d in config.GPUS.split(',')]
# os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
if ckpt_path is None:
_, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(train_output_path, config.MODEL_PREFIX)
ckpt_path = '{}-best.model'.format(model_prefix)
print('Use best checkpoint {}...'.format(ckpt_path))
shutil.copy2(ckpt_path, os.path.join(save_path, '{}_test_ckpt_{}.model'.format(config.MODEL_PREFIX, config.DATASET.TASK)))
# torch.backends.cudnn.enabled = False
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# get network
model = eval(config.MODULE)(config)
if len(device_ids) > 1:
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
else:
model = model.cuda()
if args.fp16:
[model] = amp.initialize([model],
opt_level='O2',
keep_batchnorm_fp32=False)
checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
smart_load_model_state_dict(model, checkpoint['state_dict'])
# loader
test_loader = make_dataloader(config, mode='test', distributed=False)
test_dataset = test_loader.dataset
test_database = test_dataset.database
# test
test_probs = []
test_ids = []
cur_id = 0
model.eval()
for nbatch, batch in zip(trange(len(test_loader)), test_loader):
# for nbatch, batch in tqdm(enumerate(test_loader)):
batch = to_cuda(batch)
if config.DATASET.TASK == 'Q2A':
output = model(*batch)
probs = F.softmax(output['label_logits'].float(), dim=1)
batch_size = probs.shape[0]
test_probs.append(probs.float().detach().cpu().numpy())
test_ids.append([test_database[cur_id + k]['annot_id'] for k in range(batch_size)])
cur_id += batch_size
elif config.DATASET.TASK == 'QA2R':
conditioned_probs = []
for a_id in range(4):
q_index_in_batch = test_loader.dataset.data_names.index('question')
q_align_mat_index_in_batch = test_loader.dataset.data_names.index('question_align_matrix')
batch_ = [*batch]
batch_[q_index_in_batch] = batch[q_index_in_batch][:, a_id, :, :]
batch_[q_align_mat_index_in_batch] = batch[q_align_mat_index_in_batch][:, a_id, :, :]
output = model(*batch_)
probs = F.softmax(output['label_logits'].float(), dim=1)
conditioned_probs.append(probs.float().detach().cpu().numpy())
conditioned_probs = np.concatenate(conditioned_probs, axis=1)
test_probs.append(conditioned_probs)
test_ids.append([test_database[cur_id + k]['annot_id'] for k in range(conditioned_probs.shape[0])])
cur_id += conditioned_probs.shape[0]
else:
raise ValueError('Not Support Task {}'.format(config.DATASET.TASK))
test_probs = np.concatenate(test_probs, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
result_npy_path = os.path.join(save_path, '{}_test_result_{}.npy'.format(save_name, config.DATASET.TASK))
np.save(result_npy_path, test_probs)
print('result npy saved to {}.'.format(result_npy_path))
# generate final result csv
if config.DATASET.TASK == 'Q2A':
columns = ['answer_{}'.format(i) for i in range(4)]
else:
columns = ['rationale_conditioned_on_a{}_{}'.format(i, j) for i in range(4) for j in range(4)]
dataframe = pd.DataFrame(data=test_probs, columns=columns)
dataframe['annot_id'] = test_ids
dataframe = dataframe.set_index('annot_id', drop=True)
dataframe.to_csv(result_csv_path)
print('result csv saved to {}.'.format(result_csv_path))
return result_csv_path
def merge_result(q2a_result_file, qa2r_result_file, output_file):
left_df = pd.read_csv(q2a_result_file)
right_df = pd.read_csv(qa2r_result_file)
merged_df = pd.merge(left_df, right_df, on='annot_id')
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
merged_df.to_csv(output_file, index=False)
print('merged result csv saved to {}.'.format(output_file))
| 43.026667
| 126
| 0.683297
|
25b00aca851d97bd8ea0d3d033daf52b2fbf884b
| 589
|
py
|
Python
|
dicom_viewer/osirix/export_roi.py
|
aarashy/dicom-viewer
|
438ecd9f196555f7c432c1b956783fd321a30abb
|
[
"MIT"
] | null | null | null |
dicom_viewer/osirix/export_roi.py
|
aarashy/dicom-viewer
|
438ecd9f196555f7c432c1b956783fd321a30abb
|
[
"MIT"
] | null | null | null |
dicom_viewer/osirix/export_roi.py
|
aarashy/dicom-viewer
|
438ecd9f196555f7c432c1b956783fd321a30abb
|
[
"MIT"
] | null | null | null |
#HeaderInfo
#type=ROITool
#name=Export to DICOM Overlay
#version=2021.08.05
#author=aheyd@berkeley.edu
#EndHeaderInfo
CSV_FILE_PATH = "./Users/aarash/annotations.csv"
print("wtf")
def embed_rois():
osirix_backend = OsirixBackend(osirix)
actions = [WriteToDicomOverlay(), AppendToCsv(CSV_FILE_PATH)]
dicom_files = osirix_backend.list_annotated_dicom()
for action in actions:
print("wtf")
action.apply_to_all(dicom_files)
if __name__ == "__main__":
embed_rois()
print("wtf2")
# Refresh OsiriX
osirix.frontmostViewer().needsDisplayUpdate()
| 25.608696
| 65
| 0.72326
|
6f6168922ffae8916ed949f1b84f2b51b1c9dadd
| 2,138
|
py
|
Python
|
bertdotconfig/dictutils.py
|
berttejeda/bertconfig
|
ebd9cfe1f2cbaeab2ffc167e00c70233638752d6
|
[
"MIT"
] | null | null | null |
bertdotconfig/dictutils.py
|
berttejeda/bertconfig
|
ebd9cfe1f2cbaeab2ffc167e00c70233638752d6
|
[
"MIT"
] | null | null | null |
bertdotconfig/dictutils.py
|
berttejeda/bertconfig
|
ebd9cfe1f2cbaeab2ffc167e00c70233638752d6
|
[
"MIT"
] | null | null | null |
from functools import reduce
import collections
import sys
class DictUtils:
def __init__(self):
pass
def Merge(self, dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
self.Merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def get(self, yaml_input, dict_path, default=None):
"""Interpret wildcard paths for retrieving values from a dictionary object"""
if '.*.' in dict_path:
try:
ks = dict_path.split('.*.')
if len(ks) > 1:
data = []
path_string = ks[0]
ds = self.recurse(yaml_input, path_string)
for d in ds:
sub_path_string = '{s}.{dd}.{dv}'.format(s=path_string, dd=d, dv=ks[1])
self.logger.debug('Path string is: %s' % sub_path_string)
result = self.recurse(yaml_input, sub_path_string, default)
if result:
data.append(result)
return data
else:
data = self.recurse(yaml_input, dict_path, default)
if not isinstance(data, dict):
return {}
except Exception as e:
raise(e)
else:
return self.recurse(yaml_input, dict_path, default)
def recurse(self, data_input, keys, default=None):
"""Recursively retrieve values from a dictionary object"""
result = ''
if isinstance(data_input, dict):
result = reduce(lambda d, key: d.get(key, default) if isinstance(
d, dict) else default, keys.split('.'), data_input)
return(result)
| 35.04918
| 85
| 0.574369
|
67cec514e925721aa0d9371d2e912cd8115eb7d0
| 2,669
|
py
|
Python
|
thirdpart/shibboleth/backends.py
|
yuyichao/seahub
|
82f16b9e47b41a73ed46311ebc6151745e3b6fc7
|
[
"Apache-2.0"
] | null | null | null |
thirdpart/shibboleth/backends.py
|
yuyichao/seahub
|
82f16b9e47b41a73ed46311ebc6151745e3b6fc7
|
[
"Apache-2.0"
] | null | null | null |
thirdpart/shibboleth/backends.py
|
yuyichao/seahub
|
82f16b9e47b41a73ed46311ebc6151745e3b6fc7
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.db import connection
from seaserv import ccnet_api
from seahub.auth.backends import RemoteUserBackend
from seahub.base.accounts import User
from registration.models import (
notify_admins_on_activate_request, notify_admins_on_register_complete)
class ShibbolethRemoteUserBackend(RemoteUserBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
# Create active user by default.
activate_after_creation = getattr(settings, 'SHIB_ACTIVATE_AFTER_CREATION', True)
def get_user(self, username):
try:
user = User.objects.get(email=username)
except User.DoesNotExist:
user = None
return user
def authenticate(self, remote_user, shib_meta):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
username = self.clean_username(remote_user)
local_ccnet_users = ccnet_api.search_emailusers('DB', username, -1, -1)
if not local_ccnet_users:
local_ccnet_users = ccnet_api.search_emailusers('LDAP', username, -1, -1)
if not local_ccnet_users:
if self.create_unknown_user:
user = User.objects.create_user(
email=username, is_active=self.activate_after_creation)
if user and self.activate_after_creation is False:
notify_admins_on_activate_request(user.email)
# Do not send follwing registration finished email (if any)
# which will cause confusion.
return user
if user and settings.NOTIFY_ADMIN_AFTER_REGISTRATION is True:
notify_admins_on_register_complete(user.email)
else:
user = None
return user
| 39.835821
| 85
| 0.667291
|
97a8133b3eab5e90c5e930634226d3eeddf55910
| 16,939
|
py
|
Python
|
trainGrasp_es.py
|
irom-lab/PAC-Imitation
|
1b9c203b02551895613b6710da33e1bebe4a0f11
|
[
"MIT"
] | 12
|
2020-08-11T03:26:36.000Z
|
2022-02-10T01:14:08.000Z
|
trainGrasp_es.py
|
irom-lab/PAC-Imitation
|
1b9c203b02551895613b6710da33e1bebe4a0f11
|
[
"MIT"
] | null | null | null |
trainGrasp_es.py
|
irom-lab/PAC-Imitation
|
1b9c203b02551895613b6710da33e1bebe4a0f11
|
[
"MIT"
] | 1
|
2021-03-08T10:46:06.000Z
|
2021-03-08T10:46:06.000Z
|
import os
import sys
import warnings
warnings.filterwarnings("ignore")
import torch
import numpy as np
from numpy import array
import ray
import json
import time
import scipy
import random
import matplotlib.pyplot as plt
from src.nn_grasp import PolicyNet
from src.grasp_rollout_env import GraspRolloutEnv
from src.pac_es import kl_inverse, compute_grad_ES
class TrainGrasp_PAC_ES:
def __init__(self, json_file_name, result_path, model_path):
# Extract JSON config
self.json_file_name = json_file_name
with open(json_file_name+'.json') as json_file:
self.json_data = json.load(json_file)
config_dic, pac_dic, nn_dic, optim_dic = \
[value for key, value in self.json_data.items()]
self.delta = pac_dic['delta']
self.delta_prime = pac_dic['delta_prime']
self.delta_final = pac_dic['delta_final']
self.numTrainEnvs = pac_dic['numTrainEnvs']
self.numTestEnvs = pac_dic['numTestEnvs']
self.L = pac_dic['L']
self.include_reg = pac_dic['include_reg']
self.out_cnn_dim = nn_dic['out_cnn_dim']
self.z_conv_dim = nn_dic['z_conv_dim']
self.z_mlp_dim = nn_dic['z_mlp_dim']
self.z_total_dim = nn_dic['z_conv_dim']+nn_dic['z_mlp_dim']
self.actor_pr_path = config_dic['actor_pr_path']
self.numSteps = config_dic['numSteps']
self.num_cpus = config_dic['num_cpus']
self.saved_model_path = config_dic['saved_model_path']
self.checkPalmContact = config_dic['checkPalmContact']
self.ES_method = config_dic['ES_method']
self.use_antithetic = config_dic['use_antithetic']
self.num_epsilon = config_dic['num_epsilon']
self.mu_lr = optim_dic['mu_lr']
self.logvar_lr = optim_dic['logvar_lr']
self.decayLR = optim_dic['decayLR']
# Set up seeding
self.seed = 0
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# Use CPU for ES for now
device = 'cpu'
# Config object index for all training and testing trials
self.obj_folder = config_dic['obj_folder']
self.train_obj_ind_list = np.arange(0,self.numTrainEnvs)
self.test_obj_ind_list = np.arange(500,500+self.numTestEnvs)
# Load prior policy, freeze params
actor_pr = PolicyNet(input_num_chann=1,
dim_mlp_append=0,
num_mlp_output=5,
out_cnn_dim=self.out_cnn_dim,
z_conv_dim=self.z_conv_dim,
z_mlp_dim=self.z_mlp_dim).to(device)
actor_pr.load_state_dict(torch.load(self.actor_pr_path, map_location=device))
for name, param in actor_pr.named_parameters():
param.requires_grad = False
actor_pr.eval() # not needed, but anyway
# Initialize rollout environment
self.rollout_env = GraspRolloutEnv(
actor=actor_pr,
z_total_dim=self.z_total_dim,
num_cpus=self.num_cpus,
checkPalmContact=self.checkPalmContact,
useLongFinger=config_dic['use_long_finger'])
# Set prior distribution of parameters
self.mu_pr = torch.zeros((self.z_total_dim))
self.logvar_pr = torch.zeros((self.z_total_dim))
# Initialize the posterior distribution
self.mu_param = torch.tensor(self.mu_pr, requires_grad=True)
self.logvar_param = torch.tensor(self.logvar_pr, requires_grad=True)
# Get training envs
self.trainEnvs = self.get_object_config(numTrials=self.numTrainEnvs, obj_ind_list=self.train_obj_ind_list)
# Get test envs
self.testEnvs = self.get_object_config(numTrials=self.numTestEnvs, obj_ind_list=self.test_obj_ind_list)
# Recording: training details and results
self.result_path = result_path
self.model_path = model_path
self.best_bound_data = (0, 0, 0, None, None, (self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state())) # emp, bound, step, mu, logvar, seed
self.best_emp_data = (0, 0, 0, None, None, (self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state()))
self.cost_env_his = [] # history for plotting
self.reg_his = []
self.kl_his = []
self.lr_his = []
def get_object_config(self, numTrials, obj_ind_list):
obj_x = np.random.uniform(low=0.45,
high=0.55,
size=(numTrials, 1))
obj_y = np.random.uniform(low=-0.05,
high=0.05,
size=(numTrials, 1))
obj_yaw = np.random.uniform(low=-np.pi,
high=np.pi,
size=(numTrials, 1))
objPos = np.hstack((obj_x, obj_y, 0.005*np.ones((numTrials, 1))))
objOrn = np.hstack((np.zeros((numTrials, 2)), obj_yaw))
objPathInd = np.arange(0,numTrials) # each object has unique initial condition -> one env
objPathList = []
for obj_ind in obj_ind_list:
objPathList += [self.obj_folder + str(obj_ind) + '.urdf']
return (objPos, objOrn, objPathInd, objPathList)
def train(self):
# Resume saved model if specified
if self.saved_model_path is not "":
checkpoint = torch.load(self.saved_model_path)
start_step = checkpoint['step']
# Retrieve
self.best_bound_data = checkpoint['best_bound_data']
self.best_emp_data = checkpoint['best_emp_data']
self.cost_env_his = checkpoint['cost_env_his']
self.reg_his = checkpoint['reg_his']
self.kl_his = checkpoint['kl_his']
self.lr_his = checkpoint['lr_his']
# Update params
self.mu_param = checkpoint['mu']
self.logvar_param = checkpoint['logvar']
# Load envs
self.trainEnvs = checkpoint['trainEnvs']
self.testEnvs = checkpoint['testEnvs']
# Update seed state
self.seed, python_seed_state, np_seed_state, torch_seed_state = checkpoint['seed_data']
random.setstate(python_seed_state)
np.random.set_state(np_seed_state)
torch.set_rng_state(torch_seed_state)
else:
start_step = -1 # start from beginning
# Use Adam optimizer from Pytorch, load optim state if resume
optimizer = torch.optim.Adam([
{'params': self.mu_param, 'lr': self.mu_lr},
{'params': self.logvar_param, 'lr': self.logvar_lr}])
if self.decayLR['use']:
# scheduler = torch.optim.lr_scheduler.MultiStepLR(
# optimizer,
# milestones=self.decayLR['milestones'],
# gamma=self.decayLR['gamma'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=self.decayLR['gamma'], patience=10, threshold=1e-3, threshold_mode='rel')
if self.saved_model_path is not "":
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# Determine how many policies for one env
if self.use_antithetic:
num_trial_per_env = 2*self.num_epsilon
else:
num_trial_per_env = self.num_epsilon
# Extract env configs
obj_pos_all, obj_orn_all, obj_path_ind_all, obj_path_list_all = self.trainEnvs
# Repeat env config for policies in one env
obj_pos_all = np.tile(obj_pos_all, (num_trial_per_env,1))
obj_orn_all = np.tile(obj_orn_all, (num_trial_per_env,1))
obj_path_ind_all = np.tile(obj_path_ind_all, (num_trial_per_env))
obj_path_list_all = obj_path_list_all*num_trial_per_env
# Run steps
for step in range(self.numSteps):
if step <= start_step:
continue
step_start_time = time.time()
with torch.no_grad(): # speed up
# Make a copy for the step
mu_ps = self.mu_param.clone().detach()
logvar_ps = self.logvar_param.clone().detach()
mu_pr = self.mu_pr.clone()
logvar_pr = self.mu_pr.clone()
# Sample xi used for the step
if self.use_antithetic:
epsilons = torch.normal(mean=0., std=1.,
size=(self.numTrainEnvs*self.num_epsilon, self.z_total_dim))
epsilons = torch.cat((epsilons, -epsilons)) # antithetic
else:
epsilons = torch.normal(mean=0., std=1.,
size=(self.numTrainEnvs*self.num_epsilon,
self.z_total_dim))
sigma_ps = (0.5*logvar_ps).exp()
zs_all = mu_ps + sigma_ps*epsilons
# Run trials without GUI
success_list = self.rollout_env.parallel(
zs_all=zs_all,
objPos=obj_pos_all,
objOrn=obj_orn_all,
objPathInd=obj_path_ind_all,
objPathList=obj_path_list_all)
cost_env = torch.tensor([1-s for s in success_list]).float()
emp_rate = np.mean(success_list)
# Include PAC-Bayes reg in ES
theta = zs_all
kld, R = self.get_pac_bayes(
self.numTrainEnvs,
self.delta,
logvar_ps,
logvar_pr,
mu_ps,
mu_pr)
reg = np.sqrt(R)
log_pt_pr = torch.sum(
0.5*(logvar_pr-logvar_ps) + \
(theta-mu_pr)**2/(2*logvar_pr.exp()) - \
(theta-mu_ps)**2/(2*logvar_ps.exp()) \
, dim=1)
# Get cost, check if include PAC-Bayes cost
if self.include_reg:
cost_es = cost_env + log_pt_pr/(4*self.numTrainEnvs*reg)
else:
cost_es = cost_env
# Get epsilons from mu and zs
grad_mu, grad_logvar = compute_grad_ES(
cost_es-torch.mean(cost_es),
epsilons,
sigma_ps,
method=self.ES_method)
# Print and record result
reg = reg.item()
cost_env = torch.mean(cost_env).item()
bound = 1-cost_env-reg
print("\n", step, "Emp:", emp_rate, "Env:", cost_env, "Reg:", reg, "Bound:", bound, "KL:", kld)
print('mu:', self.mu_param.data)
print('logvar:', self.logvar_param.data)
print('Time: %s\n' % (time.time() - step_start_time))
# Save mu and logvar if at best McAllester bound
if bound > self.best_bound_data[1]:
self.best_bound_data = (emp_rate, bound, step, mu_ps, logvar_ps, (self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state()))
if emp_rate > self.best_emp_data[0]:
self.best_emp_data = (emp_rate, bound, step, mu_ps, logvar_ps, (self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state()))
# Save training details, cover at each step
self.cost_env_his += [cost_env]
self.reg_his += [reg]
self.kl_his += [kld]
self.lr_his += [optimizer.state_dict()['param_groups'][0]['lr']] # only lr for mu since for sigma would be the same
torch.save({
'training_his':(self.cost_env_his, self.reg_his, self.kl_his, self.lr_his),
'cur_data': (mu_ps, logvar_ps),
'best_bound_data': self.best_bound_data,
'best_emp_data': self.best_emp_data,
'seed_data':(self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state()),
'actor_pr_path':self.actor_pr_path,
'json_data':self.json_data,
}, self.result_path+'train_details') # not saving optim_state, grad
# Do not update params until after saving results
self.mu_param.grad = grad_mu
self.logvar_param.grad = grad_logvar
optimizer.step()
# Decay learning rate if specified
if self.decayLR['use']:
scheduler.step(emp_rate)
# Save model every 5 epochs
if step % 5 == 0 and step > 0:
torch.save({
'step': step,
'mu': self.mu_param,
"logvar": self.logvar_param,
'optimizer_state_dict': optimizer.state_dict(),
"cost_env_his": self.cost_env_his,
"reg_his": self.reg_his,
"kl_his": self.kl_his,
"lr_his": self.lr_his,
'best_bound_data': self.best_bound_data,
'best_emp_data': self.best_emp_data,
"trainEnvs": self.trainEnvs,
"testEnvs": self.testEnvs,
"seed_data": (self.seed, random.getstate(), np.random.get_state(), torch.get_rng_state()),
}, self.model_path+'model_'+str(step))
def estimate_train_cost(self, mu_ps, logvar_ps):
# Extract envs
objPos, objOrn, objPathInd, objPathList = self.trainEnvs
# Run training trials
estimate_success_list = []
for sample_ind in range(self.L):
with torch.no_grad(): # speed up
print('\nRunning sample %d out of %d...\n' % (sample_ind+1, self.L))
# Sample new latent every time
epsilons = torch.normal(mean=0., std=1.,
size=(self.numTrainEnvs, self.z_total_dim))
sigma_ps = (0.5*logvar_ps).exp()
zs_all = mu_ps + sigma_ps*epsilons
success_list = self.rollout_env.parallel(
zs_all=zs_all,
objPos=objPos,
objOrn=objOrn,
objPathInd=objPathInd,
objPathList=objPathList)
estimate_success_list += success_list
return np.mean(array([1-s for s in estimate_success_list]))
def estimate_true_cost(self, mu_ps, logvar_ps):
# Extract envs
objPos, objOrn, objPathInd, objPathList = self.testEnvs
# Config all test trials
epsilons = torch.normal(mean=0., std=1.,
size=(self.numTestEnvs, self.z_total_dim))
sigma_ps = (0.5*logvar_ps).exp()
zs_all = mu_ps + sigma_ps*epsilons
# Run test trials and get estimated true cost
with torch.no_grad(): # speed up
estimate_success_list = self.rollout_env.parallel(
zs_all=zs_all,
objPos=objPos,
objOrn=objOrn,
objPathInd=objPathInd,
objPathList=objPathList)
return np.mean(array([1-s for s in estimate_success_list]))
def compute_final_bound(self, best_data):
# Retrive mu and logvar from best bound step, or best emp step
step_used = best_data[2]
mu_ps = best_data[3]
logvar_ps = best_data[4]
seed, python_seed_state, np_seed_state, torch_seed_state = best_data[5]
mu_pr = self.mu_pr.detach() # prior, checked all zeros
logvar_pr = self.logvar_pr.detach() # prior, checked all zeros
# Reload seed state
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
random.setstate(python_seed_state)
np.random.set_state(np_seed_state)
torch.set_rng_state(torch_seed_state)
# Get estimated true cost using test envs
print('Estimating true cost...')
true_estimate_cost = self.estimate_true_cost(mu_ps, logvar_ps)
# Get estimated train cost using trian envs and L=100
print('Estimating training cost (may take a while)...')
train_estimate_start_time = time.time()
train_estimate_cost = self.estimate_train_cost(mu_ps, logvar_ps)
print('\n\n\nTime to run estimate training cost:', time.time()-train_estimate_start_time)
# Get inverse bound
_, R_final = self.get_pac_bayes(
self.numTrainEnvs,
self.delta_final,
logvar_ps,
logvar_pr,
mu_ps,
mu_pr)
cost_chernoff = kl_inverse(train_estimate_cost,
(1/self.L)*np.log(2/self.delta_prime))
inv_bound = 1-kl_inverse(cost_chernoff, 2*R_final)
# McAllester and Quadratic PAC Bound, use estimated training costs with L=100
_, R = self.get_pac_bayes(
self.numTrainEnvs,
self.delta,
logvar_ps,
logvar_pr,
mu_ps,
mu_pr)
maurer_bound = 1-train_estimate_cost-np.sqrt(R)
quad_bound = 1-(np.sqrt(train_estimate_cost + R) + np.sqrt(R))**2
return step_used, R, maurer_bound, quad_bound, inv_bound, train_estimate_cost, true_estimate_cost
def get_pac_bayes(self, N, delta, logvar_ps, logvar_pr, mu_ps, mu_pr):
kld = (-0.5*torch.sum(1 \
+ logvar_ps-logvar_pr \
-(mu_ps-mu_pr)**2/logvar_pr.exp() \
-(logvar_ps-logvar_pr).exp())
).item() # as scalar
R = (kld + np.log(2*np.sqrt(N)/delta))/(2*N)
return kld, R # as scalar, not tensor
if __name__ == '__main__':
# Read JSON config
json_file_name = sys.argv[1]
# Create a new subfolder under result
result_path = 'result/'+json_file_name+'/'
if not os.path.exists(result_path):
os.mkdir(result_path)
os.mkdir(result_path+'figure/')
# Create a new subfolder under model
model_path = 'model/'+json_file_name+'/'
if not os.path.exists(model_path):
os.mkdir(model_path)
# Initialize trianing env
trainer = TrainGrasp_PAC_ES(
json_file_name=json_file_name,
result_path=result_path,
model_path=model_path)
# Train
trainer.train()
# Get bounds using best bound step, save
step_used, R, maurer_bound, quad_bound, inv_bound, train_estimate_cost, true_estimate_cost= trainer.compute_final_bound(trainer.best_bound_data)
print('Using best bound, step', step_used)
print('R:', R)
print("Maurer Bound:", maurer_bound)
print("Quadratic Bound:", quad_bound)
print("KL-inv bound:", inv_bound)
print("Train estimate:", 1-train_estimate_cost)
print("True estimate:", 1-true_estimate_cost)
print('\n')
np.savez(result_path+'bounds_best_bound.npz',
step=step_used,
R=R,
maurer_bound=maurer_bound,
quad_bound=quad_bound,
inv_bound=inv_bound,
train_estimate_cost=train_estimate_cost,
true_estimate_cost=true_estimate_cost,
)
# Get bounds using best empirical rate step, save
step_used, R, maurer_bound, quad_bound, inv_bound, train_estimate_cost, true_estimate_cost= trainer.compute_final_bound(trainer.best_emp_data)
print('Using best emp, step', step_used)
print('R:', R)
print("Maurer Bound:", maurer_bound)
print("Quadratic Bound:", quad_bound)
print("KL-inv bound:", inv_bound)
print("Train estimate:", 1-train_estimate_cost)
print("True estimate:", 1-true_estimate_cost)
print('\n')
np.savez(result_path+'bounds_best_emp.npz',
step=step_used,
R=R,
maurer_bound=maurer_bound,
quad_bound=quad_bound,
inv_bound=inv_bound,
train_estimate_cost=train_estimate_cost,
true_estimate_cost=true_estimate_cost,
)
| 34.289474
| 162
| 0.69585
|
262c9530469a2468e2dc5641f3bcf8f60d0db315
| 528
|
py
|
Python
|
backend/api-site/user/views.py
|
Basel-h-ashour/product-catalog
|
f37974c1de0b5a8dcd46deaebc1af7627b1c343d
|
[
"MIT"
] | null | null | null |
backend/api-site/user/views.py
|
Basel-h-ashour/product-catalog
|
f37974c1de0b5a8dcd46deaebc1af7627b1c343d
|
[
"MIT"
] | null | null | null |
backend/api-site/user/views.py
|
Basel-h-ashour/product-catalog
|
f37974c1de0b5a8dcd46deaebc1af7627b1c343d
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, TokenSerializer
class CreateUserView(generics.CreateAPIView):
"""creates a new user from JSON data"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Creates a new custom token for the user"""
serializer_class = TokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| 31.058824
| 60
| 0.814394
|
d3f870ad87df883b422bfd7a0c13c79eac432c0a
| 2,259
|
py
|
Python
|
testing/python3/dcptestautomation/run_dcgm_tests.py
|
deepio/DCGM
|
d10273f18fb3d425da752ab6bb7e07af3d18caec
|
[
"Apache-2.0"
] | 85
|
2021-02-03T19:58:50.000Z
|
2022-03-21T08:00:11.000Z
|
testing/python3/dcptestautomation/run_dcgm_tests.py
|
deepio/DCGM
|
d10273f18fb3d425da752ab6bb7e07af3d18caec
|
[
"Apache-2.0"
] | 19
|
2021-03-19T08:13:58.000Z
|
2022-03-17T02:50:41.000Z
|
testing/python3/dcptestautomation/run_dcgm_tests.py
|
deepio/DCGM
|
d10273f18fb3d425da752ab6bb7e07af3d18caec
|
[
"Apache-2.0"
] | 17
|
2021-02-04T06:47:30.000Z
|
2022-03-21T22:14:03.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import util
import sys
#Copy the dcgm data in csv file
def main(cmdArgs):
metrics = cmdArgs.metrics
time = cmdArgs.time
gpuid_list = cmdArgs.gpuid_list
download_bin = cmdArgs.download_bin
ret = util.removeDependencies(True)
if ret == 0:
ret = util.installDependencies(True)
if ret == 0:
if download_bin:
cmd = '{executable} run_validate_dcgm.py -m {0} -t {1} -d -i {2}'\
.format(metrics, time, gpuid_list, executable=sys.executable)
else:
cmd = '{executable} run_validate_dcgm.py -m {0} -t {1} -i {2}'\
.format(metrics, time, gpuid_list, executable=sys.executable)
ret = util.executeBashCmd(cmd, True)
print("\nTests are done, removing dependencies")
ret = util.removeDependencies(False)
print("\n All Done")
def parseCommandLine():
parser = argparse.ArgumentParser(description="Validation of dcgm metrics")
parser.add_argument("-m", "--metrics", required=True, help="Metrics to be validated \
E.g. \"1009\", etc")
parser.add_argument("-i", "--gpuid_list", required=False, default='0', \
help="comma separated gpu id list starting from 0, eg \"0,1,2\"")
parser.add_argument("-t", "--time", required=True, help="time in seconds")
parser.add_argument("-d", "--download_bin", action='store_true', required=False, default=False,\
help="If specified, download new binaries")
args = parser.parse_args()
return args
if __name__ == "__main__":
# Parsing command line options
cmdArgs = parseCommandLine()
main(cmdArgs)
| 35.857143
| 100
| 0.671093
|
4430aef4b432a8264dedf71322b2235da15de193
| 215
|
py
|
Python
|
examples/Python/XdmfExampleSystemUtils.py
|
scottwedge/xdmf
|
f41196c966997a20f60525a3d2083490a63626a3
|
[
"BSD-3-Clause"
] | 4
|
2015-12-07T08:11:06.000Z
|
2020-06-15T01:39:07.000Z
|
examples/Python/XdmfExampleSystemUtils.py
|
scottwedge/xdmf
|
f41196c966997a20f60525a3d2083490a63626a3
|
[
"BSD-3-Clause"
] | 1
|
2020-04-26T16:50:37.000Z
|
2020-04-26T16:50:37.000Z
|
examples/Python/XdmfExampleSystemUtils.py
|
scottwedge/xdmf
|
f41196c966997a20f60525a3d2083490a63626a3
|
[
"BSD-3-Clause"
] | 4
|
2016-04-04T20:54:31.000Z
|
2020-06-15T01:39:08.000Z
|
from Xdmf import *
if __name__ == "__main__":
#//getRealPath begin
priorPath = "Path you want to convert"
convertedPath = XdmfSystemUtils.getRealPath(priorPath)
#//getRealPath end
| 21.5
| 62
| 0.646512
|
4192a2f38bd02ea1bba24bda58a61e91cc210112
| 5,931
|
py
|
Python
|
source/run_nbow.py
|
ucabops/robbie
|
f74aefbdb9069d62188d4bb820bf91fa50f73b8c
|
[
"OML"
] | null | null | null |
source/run_nbow.py
|
ucabops/robbie
|
f74aefbdb9069d62188d4bb820bf91fa50f73b8c
|
[
"OML"
] | null | null | null |
source/run_nbow.py
|
ucabops/robbie
|
f74aefbdb9069d62188d4bb820bf91fa50f73b8c
|
[
"OML"
] | null | null | null |
import argparse
import json
import os
import time
import urllib
import numpy as np
import pandas as pd
import gensim
from models.nbow import master_base, key_adder
from models.amer_brit import wordpairs
def print_metrics(metrics, runs):
acc_at_1 = metrics[4]/runs
acc_at_10 = metrics[0]/runs
acc_at_100 = metrics[1]/runs
correct_at_100 = metrics[1]
correct_at_1000 = metrics[5]
median_at_100 = np.median(np.asarray(metrics[2]))
median_at_1000 = np.median(np.asarray(metrics[3]))
print(f"Total Number of Clues Considered: {runs}")
print(f"Accuracy @ Rank 1: {acc_at_1:.2%}")
print(f"Accuracy @ Rank 10: {acc_at_10:.2%}")
print(f"Accuracy @ Rank 100: {acc_at_100:.2%}")
print(f"Number of Correct Answers in top 100: {correct_at_100}")
print(f"Median answer rank, top 100: {median_at_100}")
print(f"Number of Correct Answers in top 1000: {correct_at_1000}")
print(f"Median answer rank, top 1000: {median_at_1000}")
if __name__ == '__main__':
script_desc = 'Run the neural bag-of-words model (NBOW) on the \'gquick\' dataset'
parser = argparse.ArgumentParser(description=script_desc)
parser.add_argument('filename', type=str,
help='File where data is located, excluding \'*-entries.json\' suffix. Must be in \'./data\'')
parser.add_argument('--variant', dest='variant', type=int, nargs=1, default=0,
help='Choose variant to run. Defaults to 0')
args = parser.parse_args()
# Load the dataset
filepath = f'./data/{args.filename}-entries.json'
with open(filepath, 'r') as file:
data = json.load(file)
# Download Google's pretrained W2V model
w2v_path = './data/GoogleNews-vectors-negative300.bin.gz'
if not os.path.isfile(w2v_path):
print('Downloading the pre-trained W2V model (could take a while, grab a cup of tea...)')
url = 'https://nlpcrossworddata.blob.core.windows.net/test/GoogleNews-vectors-negative300.bin.gz'
urllib.request.urlretrieve(url, w2v_path)
# Load W2V model into memory
model = gensim.models.KeyedVectors.load_word2vec_format(w2v_path, binary=True)
# Choose which BOW variant to run
if args.variant == 0:
enhancements = {'length': False,
'clue_word': False,
'anagrams': False,
'multi_synonym': False,
'spelling': False,
'multiword': False}
elif args.variant == 1:
enhancements = {'length': True,
'clue_word': False,
'anagrams': False,
'multi_synonym': False,
'spelling': False,
'multiword': False}
elif args.variant == 2:
enhancements = {'length': True,
'clue_word': True,
'anagrams': False,
'multi_synonym': False,
'spelling': False,
'multiword': False}
elif args.variant == 3:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': False,
'spelling': False,
'multiword': False}
elif args.variant == 4:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': True,
'spelling': False,
'multiword': False}
elif args.variant == 5:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': True,
'spelling': True,
'multiword': False}
elif args.variant == 6:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': False,
'spelling': True,
'multiword': False}
elif args.variant == 7:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': True,
'spelling': True,
'multiword': True}
elif args.variant == 8:
enhancements = {'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': False,
'spelling': True,
'multiword': True}
else:
msg = f'Unknown variant "{args.variant}" (must be between 0 and 8)'
raise ValueError(msg)
# Save current time. Used for metrics
start_time = time.time()
# Add embeddings for British spellings of words, if requested
if enhancements['spelling']:
model = key_adder(model, wordpairs)
dur = time.time() - start_time
print(f'British spellings added to model in {dur/60} mins.')
# Run model
keys = list(data.keys())
metrics, errs, runs = master_base(model, data, keys, pooling='mean', version=2, topn=100000, verbose=2,
enhancements={'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': False,
'multiword': True})
print_metrics(metrics, runs)
end_time = time.time()
print(f"Process finished --- {(end_time-start_time)/60:.1f} minutes ---")
| 39.805369
| 118
| 0.511718
|
925e83547232da3fedf200ebaab887dba0b2736e
| 9,901
|
py
|
Python
|
face_detection/caffemodel/predict_caffemodel.py
|
hhliao/A-Light-and-Fast-Face-Detector-for-Edge-Devices
|
439408ca0b3dbe29531dc83ff52643e38e511070
|
[
"MIT"
] | 1,172
|
2019-08-09T09:59:15.000Z
|
2020-12-14T03:20:08.000Z
|
face_detection/caffemodel/predict_caffemodel.py
|
hhliao/A-Light-and-Fast-Face-Detector-for-Edge-Devices
|
439408ca0b3dbe29531dc83ff52643e38e511070
|
[
"MIT"
] | 95
|
2019-08-19T01:15:49.000Z
|
2020-12-15T05:34:39.000Z
|
face_detection/caffemodel/predict_caffemodel.py
|
hhliao/A-Light-and-Fast-Face-Detector-for-Edge-Devices
|
439408ca0b3dbe29531dc83ff52643e38e511070
|
[
"MIT"
] | 317
|
2019-08-11T05:52:06.000Z
|
2020-12-14T13:34:53.000Z
|
# coding: utf-8
import sys
import os
import numpy
import numpy as np
import cv2
import time
# empty data batch class for dynamical properties
class DataBatch:
pass
def NMS(boxes, overlap_threshold):
'''
:param boxes: numpy nx5, n is the number of boxes, 0:4->x1, y1, x2, y2, 4->score
:param overlap_threshold:
:return:
'''
if boxes.shape[0] == 0:
return boxes
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype != numpy.float32:
boxes = boxes.astype(numpy.float32)
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
sc = boxes[:, 4]
widths = x2 - x1
heights = y2 - y1
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = heights * widths
idxs = numpy.argsort(sc) # 从小到大排序
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# compare secend highest score boxes
xx1 = numpy.maximum(x1[i], x1[idxs[:last]])
yy1 = numpy.maximum(y1[i], y1[idxs[:last]])
xx2 = numpy.minimum(x2[i], x2[idxs[:last]])
yy2 = numpy.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bo( box
w = numpy.maximum(0, xx2 - xx1 + 1)
h = numpy.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = numpy.delete(idxs, numpy.concatenate(([last], numpy.where(overlap > overlap_threshold)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick]
class PredictCaffe(object):
def __init__(self,
caffe,
symbol_file_path,
model_file_path,
receptive_field_list,
receptive_field_stride,
bbox_small_list,
bbox_large_list,
receptive_field_center_start,
num_output_scales
):
self.caffe = caffe
self.deploy = symbol_file_path
self.model = model_file_path
self.receptive_field_list = receptive_field_list
self.receptive_field_stride = receptive_field_stride
self.bbox_small_list = bbox_small_list
self.bbox_large_list = bbox_large_list
self.receptive_field_center_start = receptive_field_center_start
self.num_output_scales = num_output_scales
self.constant = [i / 2.0 for i in self.receptive_field_list]
self.input_height = 480
self.input_width = 640
self.__load_model()
def __load_model(self):
# load symbol and parameters
print('----> load symbol file: %s\n----> load model file: %s' % (self.deploy, self.model))
self.net = self.caffe.Net(self.deploy, self.model, self.caffe.TEST)
print('----> Model is loaded successfully.')
def predict(self, image, resize_scale=1, score_threshold=0.8, top_k=100, NMS_threshold=0.3, NMS_flag=True, skip_scale_branch_list=[]):
if image.ndim != 3 or image.shape[2] != 3:
print('Only RGB images are supported.')
return None
bbox_collection = []
shorter_side = min(image.shape[:2])
if shorter_side * resize_scale < 128:
resize_scale = float(128) / shorter_side
input_image = cv2.resize(image, (0, 0), fx=resize_scale, fy=resize_scale)
input_image = input_image.astype(dtype=numpy.float32)
input_image = input_image - 127.5
input_image = input_image / 127.5
input_image = input_image[:, :, :, numpy.newaxis]
input_image = input_image.transpose([3, 2, 0, 1])
print(input_image.shape)
tic = time.time()
self.net.blobs['data'].data[...] = input_image
res = self.net.forward()
outputs = []
out_key = ['slice_axis32_1', 'conv8_3_bbox',
'slice_axis33_1', 'conv10_3_bbox',
'slice_axis34_1', 'conv13_3_bbox',
'slice_axis35_1', 'conv15_3_bbox',
'slice_axis36_1', 'conv18_3_bbox',
'slice_axis37_1', 'conv21_3_bbox',
'slice_axis38_1', 'conv23_3_bbox',
'slice_axis39_1', 'conv25_3_bbox']
for key in out_key:
outputs.append(res[key])
print(res[key].shape)
toc = time.time()
infer_time = (toc - tic) * 1000
for i in range(self.num_output_scales):
if i in skip_scale_branch_list:
continue
score_map = numpy.squeeze(outputs[i * 2], (0, 1))
# score_map_show = score_map * 255
# score_map_show[score_map_show < 0] = 0
# score_map_show[score_map_show > 255] = 255
# cv2.imshow('score_map' + str(i), cv2.resize(score_map_show.astype(dtype=numpy.uint8), (0, 0), fx=2, fy=2))
# cv2.waitKey()
bbox_map = numpy.squeeze(outputs[i * 2 + 1], 0)
RF_center_Xs = numpy.array([self.receptive_field_center_start[i] + self.receptive_field_stride[i] * x for x in range(score_map.shape[1])])
RF_center_Xs_mat = numpy.tile(RF_center_Xs, [score_map.shape[0], 1])
RF_center_Ys = numpy.array([self.receptive_field_center_start[i] + self.receptive_field_stride[i] * y for y in range(score_map.shape[0])])
RF_center_Ys_mat = numpy.tile(RF_center_Ys, [score_map.shape[1], 1]).T
x_lt_mat = RF_center_Xs_mat - bbox_map[0, :, :] * self.constant[i]
y_lt_mat = RF_center_Ys_mat - bbox_map[1, :, :] * self.constant[i]
x_rb_mat = RF_center_Xs_mat - bbox_map[2, :, :] * self.constant[i]
y_rb_mat = RF_center_Ys_mat - bbox_map[3, :, :] * self.constant[i]
x_lt_mat = x_lt_mat / resize_scale
x_lt_mat[x_lt_mat < 0] = 0
y_lt_mat = y_lt_mat / resize_scale
y_lt_mat[y_lt_mat < 0] = 0
x_rb_mat = x_rb_mat / resize_scale
x_rb_mat[x_rb_mat > image.shape[1]] = image.shape[1]
y_rb_mat = y_rb_mat / resize_scale
y_rb_mat[y_rb_mat > image.shape[0]] = image.shape[0]
select_index = numpy.where(score_map > score_threshold)
for idx in range(select_index[0].size):
bbox_collection.append((x_lt_mat[select_index[0][idx], select_index[1][idx]],
y_lt_mat[select_index[0][idx], select_index[1][idx]],
x_rb_mat[select_index[0][idx], select_index[1][idx]],
y_rb_mat[select_index[0][idx], select_index[1][idx]],
score_map[select_index[0][idx], select_index[1][idx]]))
# NMS
bbox_collection = sorted(bbox_collection, key=lambda item: item[-1], reverse=True)
if len(bbox_collection) > top_k:
bbox_collection = bbox_collection[0:top_k]
bbox_collection_numpy = numpy.array(bbox_collection, dtype=numpy.float32)
if NMS_flag:
final_bboxes = NMS(bbox_collection_numpy, NMS_threshold)
final_bboxes_ = []
for i in range(final_bboxes.shape[0]):
final_bboxes_.append((final_bboxes[i, 0], final_bboxes[i, 1], final_bboxes[i, 2], final_bboxes[i, 3], final_bboxes[i, 4]))
return final_bboxes_, infer_time
else:
return bbox_collection_numpy, infer_time
def run_prediction_folder():
sys.path.append('..')
from config_farm import configuration_10_560_25L_8scales_v1 as cfg
import caffe
debug_folder = '' # fill the folder that contains images
file_name_list = [file_name for file_name in os.listdir(debug_folder) if file_name.lower().endswith('jpg')]
symbol_file_path = './configuration_10_560_25L_8scales_v1/symbol_10_560_25L_8scales_v1_deploy.prototxt'
model_file_path = './configuration_10_560_25L_8scales_v1/train_10_560_25L_8scales_v1_iter_1400000.caffemodel'
my_predictor = PredictCaffe(caffe=caffe,
symbol_file_path=symbol_file_path,
model_file_path=model_file_path,
receptive_field_list=cfg.param_receptive_field_list,
receptive_field_stride=cfg.param_receptive_field_stride,
bbox_small_list=cfg.param_bbox_small_list,
bbox_large_list=cfg.param_bbox_large_list,
receptive_field_center_start=cfg.param_receptive_field_center_start,
num_output_scales=cfg.param_num_output_scales)
for file_name in file_name_list:
im = cv2.imread(os.path.join(debug_folder, file_name))
bboxes, _ = my_predictor.predict(im, resize_scale=1, score_threshold=0.3, top_k=10000, NMS_threshold=0.3, NMS_flag=True, skip_scale_branch_list=[])
for bbox in bboxes:
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)
if max(im.shape[:2]) > 1600:
scale = 1600/max(im.shape[:2])
im = cv2.resize(im, (0, 0), fx=scale, fy=scale)
cv2.imshow('im', im)
cv2.waitKey()
# cv2.imwrite(os.path.join(debug_folder, file_name.replace('.jpg','_result.jpg')), im)
if __name__ == '__main__':
run_prediction_folder()
| 40.744856
| 155
| 0.607918
|
ec62bd71c87c6d68cf8752d37b2095f429ca73ec
| 543
|
py
|
Python
|
utils/sharding.py
|
tparikh/qontract-reconcile
|
b4b2c2af69b9b43616b26c60484a6953c4e433e7
|
[
"Apache-2.0"
] | null | null | null |
utils/sharding.py
|
tparikh/qontract-reconcile
|
b4b2c2af69b9b43616b26c60484a6953c4e433e7
|
[
"Apache-2.0"
] | null | null | null |
utils/sharding.py
|
tparikh/qontract-reconcile
|
b4b2c2af69b9b43616b26c60484a6953c4e433e7
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import logging
import os
LOG = logging.getLogger(__name__)
SHARDS = int(os.environ.get('SHARDS', 1))
SHARD_ID = int(os.environ.get('SHARD_ID', 0))
def is_in_shard(value):
if SHARDS == 1:
return True
value_md5 = hashlib.md5(value.encode())
value_hex = value_md5.hexdigest()
value_int = int(value_hex, base=16)
in_shard = value_int % SHARDS == SHARD_ID
if in_shard:
LOG.debug('IN_SHARD TRUE: %s', value)
else:
LOG.debug('IN_SHARD FALSE: %s', value)
return in_shard
| 19.392857
| 46
| 0.653775
|
0f443d0be93821f6f35a919eb50db6d2bf0c4be9
| 1,116
|
py
|
Python
|
example-plugins/mynoop.py
|
gsi-upm/senpy
|
c5a2cf23cb5ccb5ba96ae11aa1dcbb3d902c6218
|
[
"Apache-2.0"
] | 74
|
2015-02-26T18:27:00.000Z
|
2022-02-17T17:14:43.000Z
|
example-plugins/mynoop.py
|
gsi-upm/senpy
|
c5a2cf23cb5ccb5ba96ae11aa1dcbb3d902c6218
|
[
"Apache-2.0"
] | 39
|
2015-02-26T18:26:45.000Z
|
2018-05-14T09:42:59.000Z
|
example-plugins/mynoop.py
|
gsi-upm/senpy
|
c5a2cf23cb5ccb5ba96ae11aa1dcbb3d902c6218
|
[
"Apache-2.0"
] | 24
|
2016-03-10T11:52:55.000Z
|
2020-11-27T06:38:37.000Z
|
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import noop
from senpy.plugins import SentimentPlugin
class NoOp(SentimentPlugin):
'''This plugin does nothing. Literally nothing.'''
version = 0
def analyse_entry(self, entry, *args, **kwargs):
yield entry
def test(self):
print(dir(noop))
super(NoOp, self).test()
test_cases = [{
'entry': {
'nif:isString': 'hello'
},
'expected': {
'nif:isString': 'hello'
}
}]
| 27.219512
| 77
| 0.646057
|
b5e3bf4d75a6bd37fca7a7ecfee7b11e80ee9a2d
| 2,368
|
py
|
Python
|
examples/demo/pictures/main.py
|
hakanardo/kivy
|
707b4aa35bfb79577bd87dfc23aa527f0eb03640
|
[
"MIT"
] | null | null | null |
examples/demo/pictures/main.py
|
hakanardo/kivy
|
707b4aa35bfb79577bd87dfc23aa527f0eb03640
|
[
"MIT"
] | null | null | null |
examples/demo/pictures/main.py
|
hakanardo/kivy
|
707b4aa35bfb79577bd87dfc23aa527f0eb03640
|
[
"MIT"
] | null | null | null |
#!/usr/bin/kivy
'''
Basic Picture Viewer
====================
This simple image browser demonstrates the scatter widget. You should
see three framed photographs on a background. You can click and drag
the photos around, or multi-touch to drop a red dot to scale and rotate the
photos.
The photos are loaded from the local images directory, while the background
picture is from the data shipped with kivy in kivy/data/images/background.jpg.
The file pictures.kv describes the interface and the file shadow32.png is
the border to make the images look like framed photographs. Finally,
the file android.txt is used to package the application for use with the
Kivy Launcher Android application.
For Android devices, you can copy/paste this directory into
/sdcard/kivy/pictures on your Android device.
The images in the image directory are from the Internet Archive,
`https://archive.org/details/PublicDomainImages`, and are in the public
domain.
'''
import kivy
kivy.require('1.0.6')
from glob import glob
from random import randint
from os.path import join, dirname
from kivy.app import App
from kivy.logger import Logger
from kivy.uix.scatter import Scatter
from kivy.properties import StringProperty
# FIXME this shouldn't be necessary
from kivy.core.window import Window
class Picture(Scatter):
'''Picture is the class that will show the image with a white border and a
shadow. They are nothing here because almost everything is inside the
picture.kv. Check the rule named <Picture> inside the file, and you'll see
how the Picture() is really constructed and used.
The source property will be the filename to show.
'''
source = StringProperty(None)
class PicturesApp(App):
def build(self):
# the root is created in pictures.kv
root = self.root
# get any files into images directory
curdir = dirname(__file__)
for filename in glob(join(curdir, 'images', '*')):
try:
# load the image
picture = Picture(source=filename, rotation=randint(-30, 30))
# add to the main field
root.add_widget(picture)
except Exception as e:
Logger.exception('Pictures: Unable to load <%s>' % filename)
def on_pause(self):
return True
if __name__ == '__main__':
PicturesApp().run()
| 30.358974
| 78
| 0.70777
|
59b3ca7ebea2d9cd9be740f7304d959b96f92e93
| 5,744
|
py
|
Python
|
utils/upgrade_utils.py
|
Ostrokrzew/open-cas-linux
|
35eb5682c9aae13ee7b44da5acc2dd0b593a0b10
|
[
"BSD-3-Clause-Clear"
] | 139
|
2019-03-29T08:01:40.000Z
|
2022-03-19T01:01:44.000Z
|
utils/upgrade_utils.py
|
Ostrokrzew/open-cas-linux
|
35eb5682c9aae13ee7b44da5acc2dd0b593a0b10
|
[
"BSD-3-Clause-Clear"
] | 604
|
2019-04-12T14:18:59.000Z
|
2022-03-31T18:19:56.000Z
|
utils/upgrade_utils.py
|
Ostrokrzew/open-cas-linux
|
35eb5682c9aae13ee7b44da5acc2dd0b593a0b10
|
[
"BSD-3-Clause-Clear"
] | 64
|
2019-03-29T08:44:01.000Z
|
2022-03-30T09:11:30.000Z
|
#
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
import subprocess
import os
import re
def user_prompt(message, choices, default):
result = None
prompt = f"{message} ({'/'.join(choices)})[{default}]: "
logging.info(f"Prompting user: {prompt}")
while result not in choices:
result = input(f"\n{prompt}")
if not result:
logging.info(f"User chose default: {default}")
result = default
else:
logging.info(f"User chose: {result}")
return result
def yn_prompt(message, default="n"):
return user_prompt(message, choices=["y", "n"], default=default)
class Result:
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return f"{type(self).__name__}: {self.msg}"
class Failure(Result):
def result_mark(self):
return "[\u001b[31mX\u001b[0m]"
class Success(Result):
def result_mark(self):
return "[\u001b[32mv\u001b[0m]"
class Warn(Result):
def result_mark(self):
return "[\u001b[33m!\u001b[0m]"
class Except(Failure):
def result_mark(self):
return "[\u001b[31mE\u001b[0m]"
class Abort(Failure):
def result_mark(self):
return "[\u001b[31mA\u001b[0m]"
class StateMachine:
transition_map = {}
def __init__(self, initial_state, **args):
self.initial_state = initial_state
self.params = args
def run(self):
s = self.initial_state
result = Success()
self.last_fail = None
try:
while s is not None:
self.current_state = s(self)
result = self.current_state.start()
if isinstance(result, Failure):
self.last_fail = result
try:
s = self.transition_map[s][type(result)]
except KeyError:
try:
s = self.transition_map[s]["default"]
except KeyError:
s = self.transition_map["default"]
except KeyboardInterrupt:
self.result = self.abort()
except Exception as e:
self.result = self.exception(f"{type(e).__name__}({e})")
if self.last_fail:
result = self.last_fail
logging.info(f"Finishing {type(self).__name__} with result {result}")
return result
def abort(self):
log = "User interrupted"
print(log)
logging.warning(log)
return Abort()
def exception(self, e):
log = f"Stopping {type(self).__name__}. Reason: {e}"
print(log)
self.last_fail = Except(e)
logging.exception(log)
return self.last_fail
class UpgradeState:
will_prompt = False
log = ""
def __init__(self, sm):
self.state_machine = sm
def do_work(self):
raise NotImplementedError()
def start(self):
self.enter_state()
try:
self.result = self.do_work()
except KeyboardInterrupt:
self.result = Abort("User aborted")
except Exception as e:
log = f"State {type(self).__name__} failed unexpectedly. Reason: {e}"
self.result = Except(log)
logging.exception(log)
raise e
self.exit_state()
return self.result
def enter_state(self):
logging.debug(f"Entering state {type(self).__name__}")
print(f"{self.log+'...':60}", end="", flush=True)
def exit_state(self):
if isinstance(self.result, Success):
log = logging.debug
elif isinstance(self.result, Warn):
log = logging.warning
else:
log = logging.error
log(f"Exiting state {type(self).__name__} with result '{self.result}'")
if self.will_prompt:
print(f"\n{self.log+'...':60}", end="", flush=True)
print(self.result.result_mark())
def insert_module(name, installed=True, **params):
cmd_params = [f"{param}={val}" for param, val in params.items()]
cmd = ["modprobe", "--first-time"] if installed else ["insmod"]
cmd += [name] + cmd_params
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode:
raise Exception(p.stderr.decode("ascii").rstrip("\n"))
def remove_module(name):
p = subprocess.run(["rmmod", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode:
raise Exception(p.stderr.decode("ascii").rstrip("\n"))
def get_device_sysfs_path(device):
basename = os.path.basename(device)
p1 = subprocess.Popen(["find", "-L", "/sys/block", "-maxdepth", "2"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", f"{basename}$"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(
["sed", "-r", "s/(\/sys\/block\/[^/]+).*/\\1/"], stdin=p2.stdout, stdout=subprocess.PIPE
) # noqa W605
p1.stdout.close()
p2.stdout.close()
output = p3.communicate()[0]
return output.decode("ascii").rstrip("\n")
def get_device_schedulers(sysfs_path):
with open(f"{sysfs_path}/queue/scheduler", "r") as f:
schedulers = f.readline().rstrip("\n")
try:
current = re.match(".*\[(.*)\].*", schedulers)[1] # noqa W605
except IndexError:
current = "none"
pass
available = schedulers.replace("[", "").replace("]", "").split()
return current, available
def set_device_scheduler(sysfs_path, scheduler):
with open(f"{sysfs_path}/queue/scheduler", "w") as f:
f.write(f"{scheduler}\n")
def drop_os_caches():
with open(f"/proc/sys/vm/drop_caches", "w") as f:
f.write("3")
| 26.109091
| 97
| 0.584958
|
5b0c0aa6d528c9470d7e6b31bfc7df01bd41f3dd
| 2,379
|
py
|
Python
|
docanchors/search/strategies/highlight.py
|
kluge-ai/docanchors
|
15108f8fb1bab9f356ef7ca8f5a21a1ff6bd08ba
|
[
"Apache-2.0"
] | null | null | null |
docanchors/search/strategies/highlight.py
|
kluge-ai/docanchors
|
15108f8fb1bab9f356ef7ca8f5a21a1ff6bd08ba
|
[
"Apache-2.0"
] | null | null | null |
docanchors/search/strategies/highlight.py
|
kluge-ai/docanchors
|
15108f8fb1bab9f356ef7ca8f5a21a1ff6bd08ba
|
[
"Apache-2.0"
] | null | null | null |
"""Strategies for documents represented as arrays of tokens.
As presented in [1].
References
----------
[1] Kluge and Eckhardt, 2020: Explaining Suspected Phishing Attempts with Document Anchors
"""
import numpy as np
from .strategy import Strategy
class Grow(Strategy):
"""Grows highlights by adding an element after the last element of every highlight.
Note that highlights at the end of the candidate are not affected.
"""
def __call__(self, candidate: np.ndarray) -> np.ndarray:
transitions = np.diff(candidate, n=1)
transitions = np.concatenate(([False], transitions))
return np.where(transitions,
np.ones_like(candidate, dtype=bool),
candidate)
class Shrink(Strategy):
"""Shrinks highlights by removing the last element of every highlight."""
def __call__(self, candidate: np.ndarray) -> np.ndarray:
transitions = np.diff(candidate, n=1)
if candidate[-1]:
transitions = np.append(transitions, True)
else:
transitions = np.append(transitions, False)
return np.where(transitions,
np.zeros_like(candidate, dtype=bool),
candidate)
class Shift(Strategy):
"""Shift all highlights one element to the left or one element to the right.
Direction is chosen randomly.
"""
def __init__(self):
super(Shift, self).__init__()
self._shift_left = ShiftLeft()
self._shift_right = ShiftRight()
def __call__(self, candidate: np.ndarray) -> np.ndarray:
if self._random.random() < 0.5:
return self._shift_left(candidate)
else:
return self._shift_right(candidate)
class ShiftLeft(Strategy):
def __call__(self, candidate: np.ndarray) -> np.ndarray:
return np.append(candidate[1:], False)
class ShiftRight(Strategy):
def __call__(self, candidate: np.ndarray) -> np.ndarray:
return np.concatenate(([False], candidate[:-1]))
class Pass(Strategy):
"""Leave candidate unaltered."""
def __call__(self, candidate: np.ndarray) -> np.ndarray:
return candidate.copy()
class Erase(Strategy):
"""Remove all highlights from candidate."""
def __call__(self, candidate: np.ndarray) -> np.ndarray:
return np.zeros_like(candidate, dtype=bool)
| 27.034091
| 90
| 0.643548
|
af840b33e664c66f9a3f6e83f7f350e2602f57d5
| 2,792
|
py
|
Python
|
__main__.py
|
clash402/stock-alert
|
220fb237ac97d670446987d228d9a8573e95193a
|
[
"MIT"
] | null | null | null |
__main__.py
|
clash402/stock-alert
|
220fb237ac97d670446987d228d9a8573e95193a
|
[
"MIT"
] | null | null | null |
__main__.py
|
clash402/stock-alert
|
220fb237ac97d670446987d228d9a8573e95193a
|
[
"MIT"
] | null | null | null |
import requests as req
from decouple import config
from twilio.rest import Client
# PROPERTIES
STOCK_NAME = "TSLA"
COMPANY_NAME = "Tesla Inc"
# STOCK
def get_stock_closing_price(stock_name, day: int):
alpha_vantage_endpoint = "https://www.alphavantage.co/query"
alpha_vantage_key = config("ALPHA_VANTAGE_KEY")
alpha_vantage_params = {"apikey": alpha_vantage_key, "function": "TIME_SERIES_DAILY", "symbol": stock_name}
res = req.get(alpha_vantage_endpoint, params=alpha_vantage_params)
res.raise_for_status()
data = res.json()["Time Series (Daily)"]
data_list = [value for (_, value) in data.items()]
return data_list[day]["4. close"]
def get_difference(one_days_ago_closing_price, two_days_ago_closing_price):
return float(one_days_ago_closing_price) - float(two_days_ago_closing_price)
def round_difference(difference, one_days_ago_closing_price):
return round((difference / float(one_days_ago_closing_price)) * 100)
def get_up_down_emoji(difference):
if difference > 0:
return "🔺"
else:
return "🔻"
# NEWS
def get_latest_articles(company_name, stock_name, up_down_emoji, difference_rounded):
news_api_endpoint = "https://newsapi.org/v2/everything"
news_api_key = config("NEWS_API_KEY")
news_api_params = {"apiKey": news_api_key, "qInTitle": company_name}
res = req.get(news_api_endpoint, params=news_api_params)
res.raise_for_status()
articles = res.json()["articles"]
latest_three_articles = articles[:3]
return [f"{stock_name}: {up_down_emoji}{difference_rounded}%\n\n"
f"Headline:\n{article['title']}. \n\n"
f"Brief:\n{article['description']}" for article in latest_three_articles]
# MESSAGE
def will_send_message(company_name, stock_name, up_down_emoji, difference_rounded):
if abs(difference_rounded) > 1:
latest_articles = get_latest_articles(company_name, stock_name, up_down_emoji, difference_rounded)
twilio_sid = config("TWILIO_SID")
twilio_auth_token = config("TWILIO_AUTH_TOKEN")
twilio_client = Client(twilio_sid, twilio_auth_token)
for article in latest_articles:
twilio_client.messages.create(
body=article,
from_=config("SENDING_PHONE_NUMBER"),
to=config("RECEIVING_PHONE_NUMBER")
)
# MAIN
one_days_ago_closing_price = get_stock_closing_price(STOCK_NAME, 0)
two_days_ago_closing_price = get_stock_closing_price(STOCK_NAME, 1)
difference = get_difference(one_days_ago_closing_price, two_days_ago_closing_price)
difference_rounded = round_difference(difference, one_days_ago_closing_price)
up_down_emoji = get_up_down_emoji(difference)
will_send_message(COMPANY_NAME, STOCK_NAME, up_down_emoji, difference_rounded)
| 33.238095
| 111
| 0.739613
|
27ef6431fb5efed44a1daee24f0ebe0f69a13a74
| 2,358
|
py
|
Python
|
Coding_Block_Project1.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
Coding_Block_Project1.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
Coding_Block_Project1.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
# for apple
mean_01 = np.array([3.0,4.0])
cov_01 = np.array([[1.0,-0.5],[-0.5,1.0]])
#for leman(sweetness and color)
# Red is higher and yello is lower value
# sweetnes is higher and surness in lesser value
# for Lemon
mean_02 = np.array([0.0,0.0])
cov_02 = np.array([[1.5,0.5],[0.5,0.6]])
#2x2 identity matrix
# Arrays of array
dist_01 = np.random.multivariate_normal(mean_01,cov_01,200)
dist_02 = np.random.multivariate_normal(mean_02,cov_02,200)
print dist_01.shape
print dist_02.shape
#print dist_01
# Try to make scatter plot
plt.figure(0)
for x in range(dist_01.shape[0]):
plt.scatter(dist_01[x,0],dist_01[x,1],color='red')
plt.scatter(dist_02[x, 0], dist_02[x, 1], color='yellow')
#plt.show()
# Training data preperation of apples and lemons
# Using 400 Samples (200 for Apples and 200 for Lemons )
labels = np.zeros((400,1))
labels[200:] = 1.0
X_data = np.zeros((400,2))
X_data[:200, :] = dist_01
X_data[200:, :] = dist_02
print X_data
print labels
# KNN Algorithm :)
#Dist of the query_point to all other points in the space ( O(N)) time for every point + sorting
#Euclidean Distance
def dist( x1 , x2):
return np.sqrt(((x1-x2)**2).sum())
x1 = np.array([0.0,0.0])
x2 = np.array([1.0,1.0])
print dist(x1,x2)
def knn(X_train, query_point, y_train, k=5):
vals = []
for ix in range(X_train.shape[0]):
v = [dist(query_point, X_train[ix, :]), y_train[ix]]
vals.append(v)
# vals is a list containing distances and their labels
updated_vals = sorted(vals)
# Lets us pick up top K values
pred_arr = np.asarray(updated_vals[:k])
pred_arr = np.unique(pred_arr[:, 1], return_counts=True)
# Largest Occurence
print pred_arr
# q = np.array([2.0,2.0])
# knn(X_data,q,labels)
# Largest Occurence
index = pred_arr[1].argmax() # Index of largest freq
return pred_arr[0][index]
q = np.array([0.0, 4.0])
predicted_label = knn(X_data, q, labels)
#if predicted label is 0.0 then apple else lemon
print predicted_label
#
# ## Run a Loop over a testing data(Split the original data into 2 sets - Training, Testing)
#
# # FInd predictions for Q Query points
#
# # If predicted outcome = actual outcome -> Sucess else Failure
#
# # Accuracy = (Successes)/ (Total no of testing points) * 100
| 20.504348
| 96
| 0.672604
|
ac0ae8c272a8bccfd634f392cb4c1b4345357945
| 5,768
|
py
|
Python
|
pyjswidgets/pyjamas/ui/horizsplitpanel.ie6.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 739
|
2015-01-01T02:05:11.000Z
|
2022-03-30T15:26:16.000Z
|
pyjswidgets/pyjamas/ui/horizsplitpanel.ie6.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2015-03-25T23:17:04.000Z
|
2021-08-19T08:25:22.000Z
|
pyjswidgets/pyjamas/ui/horizsplitpanel.ie6.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 167
|
2015-01-01T22:27:47.000Z
|
2022-03-17T13:29:19.000Z
|
"""
Horizontal Split Panel: Left and Right layouts with a movable splitter.
/*
* Copyright 2008 Google Inc.
* Copyright 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
*
* Licensed under the Apache License, Version 2.0 (the "License") you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
"""
class ImplHorizontalSplitPanel:
""" The IE6 implementation for horizontal split panels.
"""
def __init__(self, panel):
self.panel = panel
self.isResizeInProgress = False
self.splitPosition = 0
elem = panel.getElement()
# Prevents inherited text-align settings from interfering with the
# panel's layout. The setting we choose must be bidi-sensitive,
# as left-alignment is the default with LTR directionality, and
# right-alignment is the default with RTL directionality.
if True: # TODO (LocaleInfo.getCurrentLocale().isRTL()) {
DOM.setStyleAttribute(elem, "textAlign", "right")
else:
DOM.setStyleAttribute(elem, "textAlign", "left")
DOM.setStyleAttribute(elem, "position", "relative")
# Technically, these are snapped to the top and bottom, but IE doesn't
# provide a reliable way to make that happen, so a resize listener is
# wired up to control the height of these elements.
self.panel.addAbsolutePositoning(panel.getWidgetElement(0))
self.panel.addAbsolutePositoning(panel.getWidgetElement(1))
self.panel.addAbsolutePositoning(panel.getSplitElement())
self.panel.expandToFitParentUsingPercentages(panel.container)
if True: # TODO (LocaleInfo.getCurrentLocale().isRTL()):
# Snap the left pane to the left edge of the container. We
# only need to do this when layout is RTL if we don't, the
# left pane will overlap the right pane.
panel.setLeft(panel.getWidgetElement(0), "0px")
def onAttach(self):
self.addResizeListener(self.panel.container)
self.onResize()
def onDetach(self):
DOM.setElementAttribute(self.panel.container, "onresize", None)
def onTimer(self, sender):
self.setSplitPositionUsingPixels( self.splitPosition)
self.isResizeInProgress = False
def onSplitterResize(self, px):
if not self.isResizeInProgress:
self.isResizeInProgress = True
Timer(self, 20)
self.splitPosition = px
def setSplitPositionUsingPixels(self, px):
if True: # TODO (LocaleInfo.getCurrentLocale().isRTL()) {
splitElem = self.panel.getSplitElement()
rootElemWidth = self.panel.getOffsetWidth(self.panel.container)
splitElemWidth = self.panel.getOffsetWidth(splitElem)
# This represents an invalid state where layout is incomplete. This
# typically happens before DOM attachment, but I leave it here as a
# precaution because negative width/height style attributes produce
# errors on IE.
if (rootElemWidth < splitElemWidth):
return
# Compute the new right side width.
newRightWidth = rootElemWidth - px - splitElemWidth
# Constrain the dragging to the physical size of the panel.
if (px < 0):
px = 0
newRightWidth = rootElemWidth - splitElemWidth
elif (newRightWidth < 0):
px = rootElemWidth - splitElemWidth
newRightWidth = 0
# Set the width of the right side.
self.panel.setElemWidth(self.panel.getWidgetElement(1), newRightWidth + "px")
# Move the splitter to the right edge of the left element.
self.panel.setLeft(splitElem, px + "px")
# Update the width of the left side
if (px == 0):
# This takes care of a qurky RTL layout bug with IE6.
# During DOM construction and layout, onResize events
# are fired, and this method is called with px == 0.
# If one tries to set the width of the 0 element to
# before layout completes, the 1 element will
# appear to be blanked out.
DeferredCommand.add(self)
else:
self.panel.setElemWidth(self.panel.getWidgetElement(0), px + "px")
else:
self._setSplitPositionUsingPixels(px)
def execute(self):
self.panel.setElemWidth(self.panel.getWidgetElement(0), "0px")
def updateRightWidth(self, rightElem, newRightWidth):
self.panel.setElemWidth(rightElem, newRightWidth + "px")
def addResizeListener(self, container):
resizefn = getattr(self, "onResize")
JS("""
@{{container}}['onresize'] = function() {
@{{resizefn}}();
}
""")
def onResize(self):
leftElem = self.panel.getWidgetElement(0)
rightElem = self.panel.getWidgetElement(1)
height = self.panel.getOffsetHeight(self.panel.container) + "px"
self.panel.setElemHeight(rightElem, height)
self.panel.setElemHeight(self.panel.getSplitElement(), height)
self.panel.setElemHeight(leftElem, height)
self.setSplitPositionUsingPixels(self.panel.getOffsetWidth(leftElem))
| 38.198675
| 89
| 0.642684
|
9fc649b653d5a1c6d0f5673ec6d91050db2b9193
| 563
|
py
|
Python
|
pdb_chain_separate.py
|
naotohori/cafysis
|
9d8534121c01ea75ae965cf39a1e307052ff8523
|
[
"MIT"
] | 2
|
2022-02-25T17:32:41.000Z
|
2022-03-31T14:38:55.000Z
|
pdb_chain_separate.py
|
naotohori/cafysis
|
9d8534121c01ea75ae965cf39a1e307052ff8523
|
[
"MIT"
] | 2
|
2020-05-03T08:36:10.000Z
|
2021-01-27T12:40:50.000Z
|
pdb_chain_separate.py
|
naotohori/life-of-py
|
9d8534121c01ea75ae965cf39a1e307052ff8523
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
from .file_io.pdb import PdbFile
if len(sys.argv) != 3:
print ('\n Usage: SCRIPT [input PDB file] [output DIR (with/without prefix)]\n')
sys.exit(2)
f_pdb = PdbFile(sys.argv[1])
f_pdb.open_to_read()
chains = f_pdb.read_all()
f_pdb.close()
print(('%i chains' %len(chains)))
for c in chains :
chain_id = c.get_atom(0).chain_id
if chain_id == ' ' :
continue
filename = sys.argv[2] + chain_id + '.pdb'
f = open(filename,'w')
f = PdbFile(filename)
f.open_to_write()
f.write_all([c])
| 22.52
| 84
| 0.626998
|
611245cb4ea59f93fbc0409929be4b23f234a324
| 4,046
|
py
|
Python
|
spotdl/metadata/embedder_base.py
|
khjxiaogu/spotify-downloader
|
a8dcb8d998da0769bbe210f2808d16b346453c23
|
[
"MIT"
] | 4,698
|
2017-06-20T22:37:10.000Z
|
2022-03-28T13:38:07.000Z
|
spotdl/metadata/embedder_base.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 690
|
2017-06-20T20:08:42.000Z
|
2022-02-26T23:36:07.000Z
|
spotdl/metadata/embedder_base.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 741
|
2017-06-21T23:32:51.000Z
|
2022-03-07T12:11:54.000Z
|
import mutagen
import os
from abc import ABC
from abc import abstractmethod
import urllib.request
from spotdl.metadata import BadMediaFileError
class EmbedderBase(ABC):
"""
The subclass must define the supported media file encoding
formats here using a static variable - such as:
>>> supported_formats = ("mp3", "m4a", "flac", "ogg", "opus")
"""
supported_formats = ()
@abstractmethod
def __init__(self):
"""
For every supported format, there must be a corresponding
method that applies metadata on this format.
Such as if mp3 is supported, there must exist a method named
`as_mp3` on this class that applies metadata on mp3 files.
"""
# self.targets = { fmt: eval(str("self.as_" + fmt))
# for fmt in self.supported_formats }
#
# TODO: The above code seems to fail for some reason
# I do not know.
self.targets = {}
for fmt in self.supported_formats:
# FIXME: Calling `eval` is dangerous here!
self.targets[fmt] = eval("self.as_" + fmt)
def get_encoding(self, path):
"""
This method must determine the encoding for a local
audio file. Such as "mp3", "wav", "m4a", etc.
"""
_, extension = os.path.splitext(path)
# Ignore the initial dot from file extension
return extension[1:]
def apply_metadata(self, path, metadata, cached_albumart=None, encoding=None):
"""
This method must automatically detect the media encoding
format from file path and embed the corresponding metadata
on the given file by calling an appropriate submethod.
"""
if cached_albumart is None:
cached_albumart = urllib.request.urlopen(
metadata["album"]["images"][0]["url"],
).read()
if encoding is None:
encoding = self.get_encoding(path)
if encoding not in self.supported_formats:
raise BadMediaFileError(
'The input format ("{}") is not supported.'.format(
encoding,
))
embed_on_given_format = self.targets[encoding]
try:
embed_on_given_format(path, metadata, cached_albumart=cached_albumart)
except (mutagen.id3.error, mutagen.flac.error, mutagen.oggopus.error):
raise BadMediaFileError(
'Cannot apply metadata as "{}" is badly encoded as '
'"{}".'.format(path, encoding)
)
def as_mp3(self, path, metadata, cached_albumart=None):
"""
Method for mp3 support. This method might be defined in
a subclass.
Other methods for additional supported formats must also
be declared here.
"""
raise NotImplementedError
def as_m4a(self, path, metadata, cached_albumart=None):
"""
Method for m4a support. This method might be defined in
a subclass.
Other methods for additional supported formats must also
be declared here.
"""
raise NotImplementedError
def as_flac(self, path, metadata, cached_albumart=None):
"""
Method for flac support. This method might be defined in
a subclass.
Other methods for additional supported formats must also
be declared here.
"""
raise NotImplementedError
def as_ogg(self, path, metadata, cached_albumart=None):
"""
Method for ogg support. This method might be defined in
a subclass.
Other methods for additional supported formats must also
be declared here.
"""
raise NotImplementedError
def as_opus(self, path, metadata, cached_albumart=None):
"""
Method for opus support. This method might be defined in
a subclass.
Other methods for additional supported formats must also
be declared here.
"""
raise NotImplementedError
| 32.894309
| 82
| 0.61221
|
6ea205edc357c8153620b9167e73cbe93292f6d2
| 3,964
|
py
|
Python
|
parkings/migrations/0001_initial.py
|
klemmari1/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 12
|
2016-11-29T15:13:10.000Z
|
2021-06-12T06:45:38.000Z
|
parkings/migrations/0001_initial.py
|
niuzhipeng123/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 154
|
2016-11-30T09:07:58.000Z
|
2022-02-12T08:29:36.000Z
|
parkings/migrations/0001_initial.py
|
niuzhipeng123/parkkihubi
|
93218c6046c0910e8a4c723dc7128c6eec085b8c
|
[
"MIT"
] | 15
|
2016-11-29T19:32:48.000Z
|
2022-01-05T11:31:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 14:40
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.gis.db.models.fields
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('city', models.CharField(blank=True, max_length=80, verbose_name='city')),
('postal_code', models.CharField(blank=True, max_length=20, verbose_name='postal code')),
('street', models.CharField(blank=True, max_length=128, verbose_name='street address')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Operator',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=80, verbose_name='name')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Parking',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='time created')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='time modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('device_identifier', models.CharField(db_index=True, max_length=128, verbose_name='device identifier')),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name='location')),
('registration_number', models.CharField(db_index=True, max_length=10, validators=[django.core.validators.RegexValidator('^[A-Z0-9-]+$')], verbose_name='registration number')),
('resident_code', models.CharField(blank=True, max_length=1, validators=[django.core.validators.RegexValidator('^[A-Z]{1}$')], verbose_name='resident parking code')),
('special_code', models.CharField(blank=True, max_length=10, verbose_name='special parking code')),
('time_end', models.DateTimeField(db_index=True, verbose_name='parking end time')),
('time_start', models.DateTimeField(db_index=True, verbose_name='parking start time')),
('zone', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)], verbose_name='zone number')),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='parkings', to='parkings.Address', verbose_name='address')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='parkings', to='parkings.Operator', verbose_name='operator')),
],
options={
'abstract': False,
},
),
]
| 55.830986
| 192
| 0.637235
|
269b52fc88fb7b518cdcd44990b916bed1d683d7
| 4,097
|
py
|
Python
|
src/algorithm/stp.py
|
ShuhuaGao/bcn_ss
|
7f0a46e1ed94e6c4d9df114e8d241b61dc551d85
|
[
"MIT"
] | null | null | null |
src/algorithm/stp.py
|
ShuhuaGao/bcn_ss
|
7f0a46e1ed94e6c4d9df114e8d241b61dc551d85
|
[
"MIT"
] | null | null | null |
src/algorithm/stp.py
|
ShuhuaGao/bcn_ss
|
7f0a46e1ed94e6c4d9df114e8d241b61dc551d85
|
[
"MIT"
] | null | null | null |
"""
Semi-tensor product.
This is an (incomplete) Python & numpy implementation of Dr. Cheng Daizhan's STP toolbox (http://lsc.amss.ac.cn/~dcheng/).
We try to follow the implementation details of that toolbox as much as possible.
Please refer to the documentation of that toolbox for more details.
"""
import numpy as np
from typing import Iterable
def _left_sp(A, B):
m, n = A.shape
p, q = B.shape
k = n // p
C = np.empty((m, k * q), dtype=np.result_type(A, B))
for i in range(m):
for j in range(q):
C[i, j * k: (j + 1) * k] = B[:, j].reshape((1, p)
) @ A[i].reshape((p, k))
return C
def sp(A: np.ndarray, B: np.ndarray) -> np.ndarray:
"""
Semi-Tensor Product of Matrices using Kronecker product.
This function combines `sp` and `sp1` of Cheng's STP toolbox.
Note: if either A or B is a Boolean matrix, then the other will be converted to a Boolean matrix automatically.
In such a case, the STP will be a Boolean one, i.e., AND for multiplication and OR for addition.
Time complexity:
Suppose A is mxn and B is pxq matrix.
(1) If n = p, SPT degrads to normal matrix product with complexity O(mnq)
(2) If n = pt, the complexity is O(mpqt) with Cheng's Definition 1.1 (`sp1` in Cheng's toolbox)
(3) If p = nt, the complexity is O(mnqt) with Cheng's Definition 1.1 (`sp1` in Cheng's toolbox)
(4) Otherwise, SPT is computed by Cheng's Definition 1.2 using Kronecker product. Supposing z=lcm(n, p),
the complexity is O(mqz^3/(np))
Note that the complexity of product of two nxn matrices is assumed to be O(n^3) in the above time complexity analysis,
though the highly optimized implementation in `numpy` generally runs in time between O(n^2) and O(n^3).
See https://en.wikipedia.org/wiki/Matrix_multiplication#Computational_complexity for more details.
"""
assert A.ndim == 2, 'Only 2d array (matrix) is allowed'
assert B.ndim == 2, 'Only 2d array (matrix) is allowed'
m, n = A.shape
p, q = B.shape
if np.issubdtype(A.dtype, np.bool_) or np.issubdtype(B.dtype, np.bool_):
A = A.astype(np.bool_, copy=False)
B = B.astype(np.bool_, copy=False)
if n == p:
return A @ B
# special matrices: to speed up
if n % p == 0:
return _left_sp(A, B)
if p % n == 0:
return _left_sp(B.T, A.T).T
# general matrices
z = np.lcm(n, p)
d = np.result_type(A, B)
return np.kron(A, np.eye(z // n, dtype=d)) @ np.kron(B, np.eye(z // p, dtype=d))
def logical_matrix_from(L: Iterable, n: int, dtype=np.int8) -> np.ndarray:
"""
Reconstruct a 2d logical matrix from a 1d representation.
An item `i` in `Lm` represents `\delta_n^i`.
The type of the matrix is specified by `dtype`, which can be or a sub-type of `np.number` or `np.bool_`
"""
m = np.full((n, len(L)), False, dtype=dtype)
one = True if np.issubdtype(dtype, np.bool_) else 1
for j, i in enumerate(L):
m[i - 1, j] = one
return m
def swap_matrix(m: int, n: int) -> np.ndarray:
"""
Construct a swap matrix W_{[m, n]}, whose size is mn-by-mn.
Complexity: O(m^2n^2)
:param m: int
:param n: int
:return: a swap matrix (a Boolean/logical matrix)
"""
assert m > 0 and n > 0
W = np.zeros((m * n, m * n), dtype=np.bool_)
for i in range(m):
for j in range(n):
c = i * n + j
r = j * m + i
W[r, c] = 1
return W
if __name__ == '__main__':
X = np.array([1, 2, 3, -1]).reshape((1, 4))
Y = np.array([1, 2]).reshape((2, 1))
Z = sp(X, Y)
print(Z)
X = np.array([[1, 2, 1, 1], [2, 3, 1, 2], [3, 2, 1, 0]])
Y = np.array([[1, -2], [2, -1]])
print(sp(X, Y))
A = np.array([[1], [2]])
B = np.array([[2, 1], [3, 5]])
print(sp(A, B))
C = np.array([[1, 1, 0], [0, 1, 0]], dtype=np.bool_)
E = np.array([[0, 1], [0, 0], [1, 1]], dtype=np.bool_)
D = np.array([[True], [False]])
print(sp(C, D))
print(sp(C, E))
| 36.90991
| 123
| 0.581401
|
b2f4a0df44a85cb973b50765b292b8df12ef8ced
| 43,780
|
py
|
Python
|
minecraft_discord_bridge/minecraft_discord_bridge.py
|
Tominous/minecraft-discord-bridge
|
6aa4ecec22cb7317591b6e7d6a1622264a6c915e
|
[
"Apache-2.0"
] | 1
|
2019-12-12T12:09:50.000Z
|
2019-12-12T12:09:50.000Z
|
minecraft_discord_bridge/minecraft_discord_bridge.py
|
Tominous/minecraft-discord-bridge
|
6aa4ecec22cb7317591b6e7d6a1622264a6c915e
|
[
"Apache-2.0"
] | null | null | null |
minecraft_discord_bridge/minecraft_discord_bridge.py
|
Tominous/minecraft-discord-bridge
|
6aa4ecec22cb7317591b6e7d6a1622264a6c915e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2018 Tristan Gosselin-Hane.
#
# This file is part of minecraft-discord-bridge
# (see https://github.com/starcraft66/minecraft-discord-bridge).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
import re
import json
import time
import logging
import random
import string
import uuid
import asyncio
import signal
from threading import Thread
from datetime import datetime, timedelta, timezone
from requests import RequestException
from minecraft import authentication
from minecraft.exceptions import YggdrasilError
from minecraft.networking.connection import Connection
from minecraft.networking.packets import clientbound, serverbound
import discord
from mcstatus import MinecraftServer
from bidict import bidict
from requests_futures.sessions import FuturesSession
import _thread
import minecraft_discord_bridge
from .database_session import DatabaseSession
from .elasticsearch_logger import ElasticsearchLogger, ConnectionReason
from .config import Configuration
from .database import DiscordChannel, AccountLinkToken, DiscordAccount
class MinecraftDiscordBridge():
def __init__(self):
self.return_code = os.EX_OK
self.session_token = ""
self.uuid_cache = bidict()
self.webhooks = []
self.bot_username = ""
self.next_message_time = datetime.now(timezone.utc)
self.previous_message = ""
self.player_list = bidict()
self.previous_player_list = bidict()
self.accept_join_events = False
self.tab_header = ""
self.tab_footer = ""
# Initialize the discord part
self.discord_bot = discord.Client()
self.config = Configuration("config.json")
self.connection_retries = 0
self.auth_token = None
self.connection = None
self.setup_logging(self.config.logging_level)
self.database_session = DatabaseSession()
self.logger = logging.getLogger("bridge")
self.database_session.initialize(self.config)
self.bot_perms = discord.Permissions()
self.bot_perms.update(manage_messages=True, manage_webhooks=True)
# Async http request pool
self.req_future_session = FuturesSession(max_workers=100)
self.reactor_thread = Thread(target=self.run_auth_server, args=(self.config.auth_port,))
self.aioloop = asyncio.get_event_loop()
# We need to import twisted after setting up the logger because twisted hijacks our logging
from . import auth_server
auth_server.DATABASE_SESSION = self.database_session
if self.config.es_enabled:
if self.config.es_auth:
self.es_logger = ElasticsearchLogger(
self.req_future_session,
self.config.es_url,
self.config.es_username,
self.config.es_password)
else:
self.es_logger = ElasticsearchLogger(self.req_future_session, self.config.es_url)
@self.discord_bot.event
async def on_ready(): # pylint: disable=W0612
self.logger.info("Discord bot logged in as %s (%s)", self.discord_bot.user.name, self.discord_bot.user.id)
self.logger.info("Discord bot invite link: %s", discord.utils.oauth_url(
client_id=self.discord_bot.user.id, permissions=self.bot_perms))
await self.discord_bot.change_presence(activity=discord.Game("mc!help for help"))
self.webhooks = []
session = self.database_session.get_session()
channels = session.query(DiscordChannel).all()
session.close()
for channel in channels:
channel_id = channel.channel_id
discord_channel = self.discord_bot.get_channel(channel_id)
if discord_channel is None:
session = self.database_session.get_session()
session.query(DiscordChannel).filter_by(channel_id=channel_id).delete()
session.close()
continue
channel_webhooks = await discord_channel.webhooks()
found = False
for webhook in channel_webhooks:
if webhook.name == "_minecraft" and webhook.user == self.discord_bot.user:
self.webhooks.append(webhook.url)
found = True
self.logger.debug("Found webhook %s in channel %s", webhook.name, discord_channel.name)
if not found:
# Create the hook
await discord_channel.create_webhook(name="_minecraft")
@self.discord_bot.event
async def on_message(message): # pylint: disable=W0612
# We do not want the bot to reply to itself
if message.author == self.discord_bot.user:
return
this_channel = message.channel.id
# PM Commands
if message.content.startswith("mc!help"):
try:
send_channel = message.channel
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = self.get_discord_help_string()
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
elif message.content.startswith("mc!register"):
try:
send_channel = message.channel
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
session = self.database_session.get_session()
discord_account = session.query(DiscordAccount).filter_by(discord_id=message.author.id).first()
if not discord_account:
new_discord_account = DiscordAccount(message.author.id)
session.add(new_discord_account)
session.commit()
discord_account = session.query(DiscordAccount).filter_by(discord_id=message.author.id).first()
new_token = self.generate_random_auth_token(16)
account_link_token = AccountLinkToken(message.author.id, new_token)
discord_account.link_token = account_link_token
session.add(account_link_token)
session.commit()
msg = "Please connect your minecraft account to `{}.{}:{}` in order to link it to this bridge!"\
.format(new_token, self.config.auth_dns, self.config.auth_port)
session.close()
del session
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
# Global Commands
elif message.content.startswith("mc!chathere"):
if isinstance(message.channel, discord.abc.PrivateChannel):
msg = "Sorry, this command is only available in public channels."
await message.channel.send(msg)
return
if message.author.id not in self.config.admin_users:
await message.delete()
try:
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
dm_channel = message.author.dm_channel
msg = "Sorry, you do not have permission to execute that command!"
await dm_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
session = self.database_session.get_session()
channels = session.query(DiscordChannel).filter_by(channel_id=this_channel).all()
if not channels:
new_channel = DiscordChannel(this_channel)
session.add(new_channel)
session.commit()
session.close()
del session
webhook = await message.channel.create_webhook(name="_minecraft")
self.webhooks.append(webhook.url)
msg = "The bot will now start chatting here! To stop this, run `mc!stopchathere`."
await message.channel.send(msg)
else:
msg = "The bot is already chatting in this channel! To stop this, run `mc!stopchathere`."
await message.channel.send(msg)
return
elif message.content.startswith("mc!stopchathere"):
if isinstance(message.channel, discord.abc.PrivateChannel):
msg = "Sorry, this command is only available in public channels."
await message.channel.send(msg)
return
if message.author.id not in self.config.admin_users:
await message.delete()
try:
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
dm_channel = message.author.dm_channel
msg = "Sorry, you do not have permission to execute that command!"
await dm_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
session = self.database_session.get_session()
deleted = session.query(DiscordChannel).filter_by(channel_id=this_channel).delete()
session.commit()
session.close()
for webhook in await message.channel.webhooks():
if webhook.name == "_minecraft" and webhook.user == self.discord_bot.user:
# Copy the list to avoid some problems since
# we're deleting indicies form it as we loop
# through it
if webhook.url in self.webhooks[:]:
self.webhooks.remove(webhook.url)
await webhook.delete()
if deleted < 1:
msg = "The bot was not chatting here!"
await message.channel.send(msg)
return
else:
msg = "The bot will no longer chat here!"
await message.channel.send(msg)
return
elif message.content.startswith("mc!tab"):
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
player_list = ", ".join(list(map(lambda x: x[1], self.player_list.items())))
msg = "{}\n" \
"Players online: {}\n" \
"{}".format(self.escape_markdown(
self.strip_colour(self.tab_header)), self.escape_markdown(
self.strip_colour(player_list)), self.escape_markdown(
self.strip_colour(self.tab_footer)))
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
elif message.content.startswith("mc!botlink"):
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = "Use the following link to invite this bot to a guild:\n{}".format(discord.utils.oauth_url(
client_id=self.discord_bot.user.id, permissions=self.bot_perms))
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
elif message.content.startswith("mc!about"):
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = "This bot is running minecraft-discord-bridge version {}.\n" \
"The source code is available at https://github.com/starcraft66/minecraft-discord-bridge" \
.format(minecraft_discord_bridge.__version__)
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
elif message.content.startswith("mc!"):
# Catch-all
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
await message.delete()
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = "Unknown command, type `mc!help` for a list of commands."
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
elif not message.author.bot:
session = self.database_session.get_session()
channel_should_chat = session.query(DiscordChannel).filter_by(channel_id=this_channel).first()
if channel_should_chat:
await message.delete()
discord_user = session.query(DiscordAccount).filter_by(discord_id=message.author.id).first()
if discord_user:
if discord_user.minecraft_account:
minecraft_uuid = discord_user.minecraft_account.minecraft_uuid
session.close()
del session
minecraft_username = self.mc_uuid_to_username(minecraft_uuid)
# Max chat message length: 256, bot username does not count towards this
# Does not count|Counts
# <BOT_USERNAME> minecraft_username: message
padding = 2 + len(minecraft_username)
message_to_send = self.remove_emoji(
message.clean_content.encode('utf-8').decode('ascii', 'replace')).strip()
message_to_discord = self.escape_markdown(message.clean_content)
total_len = padding + len(message_to_send)
if total_len > 256:
message_to_send = message_to_send[:(256 - padding)]
message_to_discord = message_to_discord[:(256 - padding)]
elif not message_to_send:
return
session = self.database_session.get_session()
channels = session.query(DiscordChannel).all()
session.close()
del session
if message_to_send == self.previous_message or \
datetime.now(timezone.utc) < self.next_message_time:
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = "Your message \"{}\" has been rate-limited.".format(message.clean_content)
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(
message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
self.previous_message = message_to_send
self.next_message_time = datetime.now(timezone.utc) + timedelta(
seconds=self.config.message_delay)
self.logger.info("Outgoing message from discord: Username: %s Message: %s",
minecraft_username, message_to_send)
for channel in channels:
discord_channel = self.discord_bot.get_channel(channel.channel_id)
if not discord_channel:
session = self.database_session.get_session()
session.query(DiscordChannel).filter_by(channel_id=channel.channel_id).delete()
session.close()
continue
webhooks = await discord_channel.webhooks()
for webhook in webhooks:
if webhook.name == "_minecraft":
await webhook.send(
username=minecraft_username,
avatar_url="https://visage.surgeplay.com/face/160/{}".format(
minecraft_uuid),
content=message_to_discord)
packet = serverbound.play.ChatPacket()
packet.message = "{}: {}".format(minecraft_username, message_to_send)
self.connection.write_packet(packet)
else:
send_channel = message.channel
try:
if isinstance(message.channel, discord.abc.GuildChannel):
dm_channel = message.author.dm_channel
if not dm_channel:
await message.author.create_dm()
send_channel = message.author.dm_channel
msg = "Unable to send chat message: there is no Minecraft account linked to this discord " \
"account, please run `mc!register`."
await send_channel.send(msg)
return
except discord.errors.Forbidden:
if isinstance(message.author, discord.abc.User):
msg = "{}, please allow private messages from this bot.".format(message.author.mention)
error_msg = await message.channel.send(msg)
await asyncio.sleep(3)
await error_msg.delete()
return
finally:
session.close()
del session
else:
session.close()
del session
def run(self):
self.logger.debug("Checking if the server {} is online before connecting.")
if not self.config.mc_online:
self.logger.info("Connecting in offline mode...")
while not self.is_server_online():
self.logger.info('Not connecting to server because it appears to be offline.')
time.sleep(15)
self.bot_username = self.config.mc_username
self.connection = Connection(
self.config.mc_server, self.config.mc_port, username=self.config.mc_username,
handle_exception=self.minecraft_handle_exception)
else:
self.auth_token = authentication.AuthenticationToken()
try:
self.auth_token.authenticate(self.config.mc_username, self.config.mc_password)
except YggdrasilError as ex:
self.logger.info(ex)
sys.exit(os.EX_TEMPFAIL)
self.bot_username = self.auth_token.profile.name
self.logger.info("Logged in as %s...", self.auth_token.profile.name)
while not self.is_server_online():
self.logger.info('Not connecting to server because it appears to be offline.')
time.sleep(15)
self.connection = Connection(
self.config.mc_server, self.config.mc_port, auth_token=self.auth_token,
handle_exception=self.minecraft_handle_exception)
self.register_handlers(self.connection)
self.connection_retries += 1
self.reactor_thread.start()
self.connection.connect()
try:
self.aioloop.run_until_complete(self.discord_bot.start(self.config.discord_token))
except (KeyboardInterrupt, SystemExit):
# log out of discord
self.aioloop.run_until_complete(self.discord_bot.logout())
# log out of minecraft
self.connection.disconnect()
# shut down auth server
from twisted.internet import reactor
reactor.callFromThread(reactor.stop)
# clean up auth server thread
self.reactor_thread.join()
finally:
# close the asyncio event loop discord uses
self.aioloop.close()
return self.return_code
def mc_uuid_to_username(self, mc_uuid: str):
if mc_uuid not in self.uuid_cache:
try:
short_uuid = mc_uuid.replace("-", "")
mojang_response = self.req_future_session.get("https://api.mojang.com/user/profiles/{}/names".format(
short_uuid)).result().json()
if len(mojang_response) > 1:
# Multiple name changes
player_username = mojang_response[-1]["name"]
else:
# Only one name
player_username = mojang_response[0]["name"]
self.uuid_cache[mc_uuid] = player_username
return player_username
except RequestException as ex:
self.logger.error(ex, exc_info=True)
self.logger.error("Failed to lookup %s's username using the Mojang API.", mc_uuid)
else:
return self.uuid_cache[mc_uuid]
def mc_username_to_uuid(self, username: str):
if username not in self.uuid_cache.inv:
try:
player_uuid = self.req_future_session.get(
"https://api.mojang.com/users/profiles/minecraft/{}".format(username)).result().json()["id"]
long_uuid = uuid.UUID(player_uuid)
self.uuid_cache.inv[username] = str(long_uuid)
return player_uuid
except RequestException:
self.logger.error("Failed to lookup %s's username using the Mojang API.", username)
else:
return self.uuid_cache.inv[username]
def get_discord_help_string(self):
help_str = ("Admin commands:\n"
"`mc!chathere`: Starts outputting server messages in this channel\n"
"`mc!stopchathere`: Stops outputting server messages in this channel\n"
"User commands:\n"
"`mc!tab`: Sends you the content of the server's player/tab list\n"
"`mc!register`: Starts the minecraft account registration process\n"
"`mc!botlink`: Sends you the link to invite this bot to a guild\n"
"`mc!about`: Sends you information about the running bridge\n"
"To start chatting on the minecraft server, please register your account using `mc!register`.")
return help_str
# https://stackoverflow.com/questions/33404752/removing-emojis-from-a-string-in-python
def remove_emoji(self, dirty_string):
emoji_pattern = re.compile(
"["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U0001F900-\U0001FAFF" # CJK Compatibility Ideographs
# u"\U00002702-\U000027B0"
# u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', dirty_string)
def escape_markdown(self, md_string):
# Don't mess with urls
url_regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
escaped_string = ""
# Split the message into pieces, each "word" speparated into a string is a piece
# Discord ignores formatting characters in urls so we can't just replace the whole
# string... We need to go through words one by one to find out what is a url (don't)
# escape) and what isn't (escape).
for piece in md_string.split(" "):
if url_regex.match(piece):
escaped_string += "{} ".format(piece)
continue
# Absolutely needs to go first or it will replace our escaping slashes!
piece = piece.replace("\\", "\\\\")
piece = piece.replace("_", "\\_")
piece = piece.replace("*", "\\*")
escaped_string += "{} ".format(piece)
if escaped_string.startswith(">"):
escaped_string = "\\" + escaped_string
escaped_string.strip()
return escaped_string
def strip_colour(self, dirty_string):
colour_pattern = re.compile(
u"\U000000A7" # selection symbol
".", flags=re.UNICODE)
return colour_pattern.sub(r'', dirty_string)
def setup_logging(self, level):
if level.lower() == "debug":
log_level = logging.DEBUG
else:
log_level = logging.INFO
log_format = "%(asctime)s:%(name)s:%(levelname)s:%(message)s"
logging.basicConfig(filename="bridge_log.log", format=log_format, level=log_level)
stdout_logger = logging.StreamHandler(sys.stdout)
stdout_logger.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(stdout_logger)
def run_auth_server(self, port):
# We need to import twisted after setting up the logger because twisted hijacks our logging
from twisted.internet import reactor
from .auth_server import AuthFactory
# Create factory
factory = AuthFactory()
# Listen
self.logger.info("Starting authentication server on port %d", port)
factory.listen("", port)
reactor.run(installSignalHandlers=False)
def generate_random_auth_token(self, length):
letters = string.ascii_lowercase + string.digits + string.ascii_uppercase
return ''.join(random.choice(letters) for i in range(length))
def handle_disconnect(self, json_data=""):
self.logger.info('Disconnected.')
if json_data:
self.logger.info("Disconnect json data: %s", json_data)
if self.connection_retries >= self.config.failsafe_retries:
self.logger.info("Failed to join the server %s times in a row. Exiting.", self.connection_retries)
self.logger.info("Use a process supervisor if you wish to automatically restart the bridge.")
# This is possibly a huge hack... Since we cannot reliably raise exceptions on this thread
# for them to be caught on the main thread, we call interrupt_main to raise a KeyboardInterrupt
# on main and tell it to shut the bridge down.
self.return_code = os.EX_TEMPFAIL
_thread.interrupt_main()
return
self.previous_player_list = self.player_list.copy()
self.accept_join_events = False
self.player_list = bidict()
if self.connection.connected:
self.logger.info("Forced a disconnection because the connection is still connected.")
self.connection.disconnect(immediate=True)
time.sleep(15)
while not self.is_server_online():
self.logger.info('Not reconnecting to server because it appears to be offline.')
time.sleep(15)
self.logger.info('Reconnecting.')
self.connection_retries += 1
self.connection.connect()
def handle_disconnect_packet(self, disconnect_packet):
self.handle_disconnect(disconnect_packet.json_data)
def minecraft_handle_exception(self, exception, exc_info):
self.logger.error("A minecraft exception occured! %s:", exception, exc_info=exc_info)
self.handle_disconnect()
def is_server_online(self):
server = MinecraftServer.lookup("{}:{}".format(self.config.mc_server, self.config.mc_port))
try:
status = server.status()
del status
return True
except ConnectionRefusedError:
return False
# AttributeError: 'TCPSocketConnection' object has no attribute 'socket'
# This might not be required as it happens upstream
except AttributeError:
return False
def register_handlers(self, connection):
connection.register_packet_listener(
self.handle_join_game, clientbound.play.JoinGamePacket)
connection.register_packet_listener(
self.handle_chat, clientbound.play.ChatMessagePacket)
connection.register_packet_listener(
self.handle_health_update, clientbound.play.UpdateHealthPacket)
connection.register_packet_listener(
self.handle_disconnect_packet, clientbound.play.DisconnectPacket)
connection.register_packet_listener(
self.handle_tab_list, clientbound.play.PlayerListItemPacket)
connection.register_packet_listener(
self.handle_player_list_header_and_footer_update, clientbound.play.PlayerListHeaderAndFooterPacket)
def handle_player_list_header_and_footer_update(self, header_footer_packet):
self.logger.debug("Got Tablist H/F Update: header=%s", header_footer_packet.header)
self.logger.debug("Got Tablist H/F Update: footer=%s", header_footer_packet.footer)
self.tab_header = json.loads(header_footer_packet.header)["text"]
self.tab_footer = json.loads(header_footer_packet.footer)["text"]
def handle_tab_list(self, tab_list_packet):
self.logger.debug("Processing tab list packet")
for action in tab_list_packet.actions:
if isinstance(action, clientbound.play.PlayerListItemPacket.AddPlayerAction):
self.logger.debug(
"Processing AddPlayerAction tab list packet, name: %s, uuid: %s", action.name, action.uuid)
username = action.name
player_uuid = action.uuid
if action.name not in self.player_list.inv:
self.player_list.inv[action.name] = action.uuid
else:
# Sometimes we get a duplicate add packet on join idk why
return
if action.name not in self.uuid_cache.inv:
self.uuid_cache.inv[action.name] = action.uuid
# Initial tablist backfill
if self.accept_join_events:
webhook_payload = {
'username': username,
'avatar_url': "https://visage.surgeplay.com/face/160/{}".format(player_uuid),
'content': '',
'embeds': [{'color': 65280, 'title': '**Joined the game**'}]
}
for webhook in self.webhooks:
self.req_future_session.post(webhook, json=webhook_payload)
if self.config.es_enabled:
self.es_logger.log_connection(
uuid=action.uuid, reason=ConnectionReason.CONNECTED, count=len(self.player_list))
return
else:
# The bot's name is sent last after the initial back-fill
if action.name == self.bot_username:
self.accept_join_events = True
if self.config.es_enabled:
diff = set(self.previous_player_list.keys()) - set(self.player_list.keys())
for idx, player_uuid in enumerate(diff):
self.es_logger.log_connection(
uuid=player_uuid, reason=ConnectionReason.DISCONNECTED,
count=len(self.previous_player_list) - (idx + 1))
# Don't bother announcing the bot's own join message (who cares) but log it for analytics still
if self.config.es_enabled:
self.es_logger.log_connection(
uuid=action.uuid, reason=ConnectionReason.CONNECTED, count=len(self.player_list))
if self.config.es_enabled:
self.es_logger.log_connection(uuid=action.uuid, reason=ConnectionReason.SEEN)
if isinstance(action, clientbound.play.PlayerListItemPacket.RemovePlayerAction):
self.logger.debug("Processing RemovePlayerAction tab list packet, uuid: %s", action.uuid)
username = self.mc_uuid_to_username(action.uuid)
player_uuid = action.uuid
webhook_payload = {
'username': username,
'avatar_url': "https://visage.surgeplay.com/face/160/{}".format(player_uuid),
'content': '',
'embeds': [{'color': 16711680, 'title': '**Left the game**'}]
}
for webhook in self.webhooks:
self.req_future_session.post(webhook, json=webhook_payload)
del self.uuid_cache[action.uuid]
if action.uuid in self.player_list:
del self.player_list[action.uuid]
if self.config.es_enabled:
self.es_logger.log_connection(
uuid=action.uuid, reason=ConnectionReason.DISCONNECTED, count=len(self.player_list))
def handle_join_game(self, join_game_packet):
self.logger.info('Connected and joined game as entity id %d', join_game_packet.entity_id)
self.player_list = bidict()
self.connection_retries = 0
def handle_chat(self, chat_packet):
json_data = json.loads(chat_packet.json_data)
if "extra" not in json_data:
return
chat_string = ""
for chat_component in json_data["extra"]:
chat_string += chat_component["text"]
# Handle chat message
regexp_match = re.match("<(.*?)> (.*)", chat_string, re.M | re.I)
if regexp_match:
username = regexp_match.group(1)
original_message = regexp_match.group(2)
player_uuid = self.mc_username_to_uuid(username)
if username.lower() == self.bot_username.lower():
# Don't relay our own messages
if self.config.es_enabled:
bot_message_match = re.match("<{}> (.*?): (.*)".format(
self.bot_username.lower()), chat_string, re.M | re.I)
if bot_message_match:
self.es_logger.log_chat_message(
uuid=self.mc_username_to_uuid(bot_message_match.group(1)),
display_name=bot_message_match.group(1),
message=bot_message_match.group(2),
message_unformatted=chat_string)
self.es_logger.log_raw_message(
msg_type=chat_packet.Position.name_from_value(chat_packet.position),
message=chat_packet.json_data)
return
self.logger.info("Incoming message from minecraft: Username: %s Message: %s", username, original_message)
self.logger.debug("msg: %s", repr(original_message))
message = self.escape_markdown(self.remove_emoji(original_message.strip().replace(
"@", "@\N{zero width space}")))
webhook_payload = {
'username': username,
'avatar_url': "https://visage.surgeplay.com/face/160/{}".format(player_uuid),
'content': '{}'.format(message)
}
for webhook in self.webhooks:
self.req_future_session.post(webhook, json=webhook_payload)
if self.config.es_enabled:
self.es_logger.log_chat_message(
uuid=player_uuid, display_name=username, message=original_message, message_unformatted=chat_string)
if self.config.es_enabled:
self.es_logger.log_raw_message(
msg_type=chat_packet.Position.name_from_value(chat_packet.position),
message=chat_packet.json_data)
def handle_health_update(self, health_update_packet):
if health_update_packet.health <= 0:
self.logger.debug("Respawned the player because it died")
packet = serverbound.play.ClientStatusPacket()
packet.action_id = serverbound.play.ClientStatusPacket.RESPAWN
self.connection.write_packet(packet)
def handle_sigterm(*args, **kwargs):
raise KeyboardInterrupt
def main():
signal.signal(signal.SIGTERM, handle_sigterm)
bridge = MinecraftDiscordBridge()
return_code = bridge.run()
sys.exit(return_code)
if __name__ == "__main__":
main()
| 51.264637
| 120
| 0.555688
|
ea810793cf7893c43a0a2d045596d3ac121df09e
| 44,767
|
py
|
Python
|
backend/hqlib/metric_source/sonar.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 25
|
2016-11-25T10:41:24.000Z
|
2021-07-03T14:02:49.000Z
|
backend/hqlib/metric_source/sonar.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 783
|
2016-09-19T12:10:21.000Z
|
2021-01-04T20:39:15.000Z
|
backend/hqlib/metric_source/sonar.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 15
|
2015-03-25T13:52:49.000Z
|
2021-03-08T17:17:56.000Z
|
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import functools
import logging
import json
import re
from typing import List, Tuple, Dict, Optional, Union, Sequence
from distutils.version import LooseVersion
from . import url_opener
from .. import utils, metric_source
from ..typing import DateTime, Number
def extract_branch_decorator(func):
""" Checks if product name has to be splitted into product and branch and performs the splitting."""
def _branch_param(self, product: str, *args, **kwargs) -> (str, str):
""" Return the branch url parameter. """
if self.is_branch_name_included(product):
prod = product.rsplit(":", 1)
if len(prod) == 2:
return func(self, prod[0], None if prod[1] == '' else prod[1], *args, **kwargs)
logging.warning(
"A branch name is not defined in '%s' and no component with corresponding name is found.", product)
return func(self, product, None, *args, **kwargs)
return _branch_param
class Sonar(metric_source.TestReport):
""" Class representing the Sonar facade. """
metric_source_name = 'SonarQube'
suppression_rules = ("squid:NoSonar", "csharpsquid:S1309", "squid:S1309", "squid:S1310", "squid:S1315",
"Pylint:I0011", "Pylint:I0020")
def __init__(self, sonar_url: str, *args, **kwargs) -> None:
self._url_opener = \
url_opener.UrlOpener(username=kwargs.get("username", ""), password=kwargs.get("password", ""))
super().__init__(url=sonar_url, *args, **kwargs)
self._version_number_url = sonar_url + 'api/server/version'
version_number = LooseVersion(self.version_number()) if self.version_number() else None
self.__stuff_right_sonar_version_class(version_number, sonar_url)
def __stuff_right_sonar_version_class(self, version_number: LooseVersion, sonar_url: str):
if version_number is not None and version_number >= LooseVersion('7.0'):
self.__class__ = Sonar7
else:
self.__class__ = Sonar6
self._init_from_facade(sonar_url=sonar_url) # pylint: disable=no-member
self.__log_version_messages(version_number)
@classmethod
def __log_version_messages(cls, version_number: LooseVersion):
if version_number is not None:
if version_number < LooseVersion('5.4'):
logging.warning(
"SonarQube version lower than 5.4 is not supported. Version %s detected.", version_number)
elif version_number >= LooseVersion('9.0'):
logging.warning(
"SonarQube version %s is not supported. Supported versions are from 6.0 to 9.0(excluding).",
version_number)
@functools.lru_cache(maxsize=4096)
def _get_json(self, url: str, *args, **kwargs) -> \
Union[Dict[str, Dict], List[Dict[str, Union[str, List[Dict[str, str]]]]]]:
""" Get and evaluate the json from the url. """
json_string = self._url_opener.url_read(url, *args, **kwargs)
return utils.eval_json(json_string)
@classmethod
def _add_branch_param_to_url(cls, url: str, branch: str) -> str:
""" Adds branch url query param to the url, if defined. """
return url + "&branch=" + branch if branch else url
@functools.lru_cache(maxsize=1024)
def version_number(self) -> Optional[str]:
""" Return the version number of Sonar. """
try:
version_number = self._url_opener.url_read(self._version_number_url)
version_number = version_number.rstrip() if version_number is not None else None
logging.info("Sonar Qube server version retrieved: %s", version_number)
return version_number
except self._url_opener.url_open_exceptions:
logging.warning("Error retrieving Sonar Qube server version!")
return None
@functools.lru_cache(maxsize=1024)
def violation_sorts(self) -> List[Tuple]:
""" Returns violation sorts, depending on sonar version """
sorts = [('BUG', 'Bugs'), ('VULNERABILITY', 'Vulnerabilities'), ('CODE_SMELL', 'Code Smell')]
if self.is_security_hotspots_available():
sorts.append(('SECURITY_HOTSPOT', 'Security Hotspot'))
return sorts
@functools.lru_cache(maxsize=1024)
def is_security_hotspots_available(self):
""" Returns if the security hotspot violations are available, based on sonar version """
return self.version_number() >= LooseVersion('7.3')
def _report_datetime(self, metric_source_id: str) -> DateTime: # pragma: no cover
""" Formal overriding of an abstract method. It is never used."""
pass
class Sonar6(Sonar):
""" Class representing the Sonar instance, for apis supported in versions 5.x and 6.x. """
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
metric_source_name = 'SonarQube'
def is_branch_name_included(self, product: str) -> bool:
""" Checks if the component name includes the branch name. """
return self.version_number() \
and LooseVersion(self.version_number()) >= LooseVersion('6.7') \
and self.is_branch_plugin_installed() \
and self.is_component_absent(product)
def _init_from_facade(self, sonar_url: str):
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name
self._base_dashboard_url = sonar_url + 'dashboard?id={project}'
self._base_violations_url = sonar_url + 'issues/search#resolved=false|componentRoots={component}'
self._suppressions_url = sonar_url + f"issues/search#rules={','.join(self.suppression_rules)}" + \
"|componentRoots={component}"
self._violations_type_severity_url = sonar_url + \
'project/issues?id={component}&resolved=false&types={type}&severities={severities}'
self._issues_api_url = sonar_url + 'api/issues/search?componentRoots={component}&resolved=false&rules={rule}'
self._issues_by_type_api_url = sonar_url + \
'api/issues/search?componentRoots={component}&resolved=false&types={type}'
self._issues_by_type_and_severity_api_url = sonar_url + \
'api/issues/search?componentRoots={component}&resolved=false&types={type}&severities={severities}'
self._analyses_api_url = sonar_url + 'api/project_analyses/search?project={project}&format=json&ps=1'
self._components_show_api_url = sonar_url + 'api/components/show?component={component}'
self._components_search_api_url = sonar_url + 'api/components/search?qualifiers=BRC,TRK&q={component}'
self._resource_api_url = sonar_url + 'api/resources?resource={resource}&format=json'
self._projects_api_url = sonar_url + 'api/projects/index?subprojects=true'
self._measures_api_url = sonar_url + 'api/measures/component?componentKey={component}&metricKeys={metric}'
self._false_positives_api_url = sonar_url + \
'api/issues/search?resolutions=FALSE-POSITIVE&componentRoots={resource}'
self._false_positives_url = sonar_url + 'issues/search#resolutions=FALSE-POSITIVE|componentRoots={resource}'
self._wont_fix_api_url = sonar_url + 'api/issues/search?resolutions=WONTFIX&componentRoots={resource}'
self._wont_fix_url = sonar_url + 'issues/search#resolutions=WONTFIX|componentRoots={resource}'
self._plugin_api_url = sonar_url + 'api/updatecenter/installed_plugins?format=json'
self._quality_profiles_api_url = sonar_url + 'api/qualityprofiles/search?format=json'
self._old_quality_profiles_api_url = sonar_url + 'api/profiles/list?format=json'
logging.info("Sonar class instantiated as Sonar6.")
# Coverage report API
@extract_branch_decorator
def maintainability_bugs(self, product: str, branch: str) -> int:
""" Return the number of maintainability bugs detected by sonar, for the product. """
return self.__number_of_issues(product, branch,
self._issues_by_type_api_url.format(component=product, type='BUG'), 0)
@extract_branch_decorator
def violations_type_severity(self, product: str, branch: str, violation_type: str, severity: str)\
-> (str, int, str):
""" Return the number of violations of a given type and severity, detected by sonar, for the product. """
return (
self._violations_type_severity_url.format(
component=product, type=violation_type.upper(), severities=severity.upper()),
self.__number_of_issues(
product, branch,
self._issues_by_type_and_severity_api_url.format(
component=product, type=violation_type.upper(), severities=severity.upper()), 0),
self.__time_to_fix(
branch,
self._issues_by_type_and_severity_api_url.format(
component=product, type=violation_type.upper(), severities=severity.upper()), '-')
)
@extract_branch_decorator
def vulnerabilities(self, product: str, branch: str) -> int:
""" Return the number of vulnerabilities detected by sonar, for the product. """
return self.__number_of_issues(product, branch,
self._issues_by_type_api_url.format(component=product, type='VULNERABILITY'), 0)
@extract_branch_decorator
def code_smells(self, product: str, branch: str) -> int:
""" Return the number of code smells detected by sonar, for the product. """
return self.__number_of_issues(product, branch,
self._issues_by_type_api_url.format(component=product, type='CODE_SMELL'), 0)
@extract_branch_decorator
def security_hotspots(self, product: str, branch: str) -> int:
""" Return the number of code smells detected by sonar, for the product. """
return self.__number_of_issues(
product, branch, self._issues_by_type_api_url.format(component=product, type='SECURITY_HOTSPOT'), 0)
def has_branch_coverage(self, metric_source_id: str) -> bool:
""" Determines if the branch coverage is defined on Sonar. """
# pylint: disable=no-value-for-parameter
return self.version_number() is None or self.unittest_branch_coverage(metric_source_id) != -1
def statement_coverage(self, metric_source_id: str) -> float:
""" Return the statement coverage for a specific product. """
return self.unittest_line_coverage(metric_source_id)
def branch_coverage(self, metric_source_id: str) -> float:
""" Return the branch coverage for a specific product. """
return self.unittest_branch_coverage(metric_source_id)
# Test report API
def _passed_tests(self, metric_source_id: str) -> int:
""" Return the number of passed tests as reported by the test report. """
return self.unittests(metric_source_id) - self.failing_unittests(metric_source_id)
def _failed_tests(self, metric_source_id: str) -> int:
""" Return the number of failed tests as reported by the test report. """
return self.failing_unittests(metric_source_id)
# Coverage report and test report API
def metric_source_urls(self, *metric_source_ids: str) -> Sequence[str]:
""" Return the metric source urls for human users. """
return [self.dashboard_url(metric_source_id) for metric_source_id in metric_source_ids]
# Sonar
@extract_branch_decorator
def version(self, product: str, branch: str) -> str:
""" Return the version of the product. """
url = self._add_branch_param_to_url(
self._analyses_api_url.format(project=product) + '&category=VERSION', branch)
try:
analyses_json = self._get_json(url, log_error=False)
try:
return analyses_json['analyses'][0]['events'][0]['name']
except (KeyError, IndexError) as reason:
logging.warning("Couldn't get version number of %s from JSON %s (retrieved from %s): %s",
product, analyses_json, url, reason)
return '?'
except self._url_opener.url_open_exceptions:
# Try older API:
url = self._add_branch_param_to_url(self._resource_api_url.format(resource=product), branch)
try:
analyses_json = self._get_json(url)
except self._url_opener.url_open_exceptions:
return '?'
try:
return analyses_json[0]['version']
except (KeyError, IndexError) as reason:
logging.warning("Couldn't get version number of %s from JSON %s (retrieved from %s): %s",
product, analyses_json, url, reason)
return '?'
@classmethod
def __normalize_version_build(cls, version: str) -> str:
if '(build' in version:
version_parts = version.split("(build")
return version_parts[0].strip() + '.' + version_parts[1].split(")")[0].strip()
return version
@functools.lru_cache(maxsize=4096)
def _is_commercialy_licensed(self) -> bool:
try:
return LooseVersion(self.version_number()) >= LooseVersion('7.3')\
and self._get_valid_license_json()["isValidLicense"]
except KeyError as reason:
logging.warning('Error parsing json license information response: %s.', reason)
return False
except self._url_opener.url_open_exceptions:
logging.warning('Error retrieving commercial license information.')
return False
def plugin_version(self, plugin: str) -> str:
""" Return the version of the SonarQube plugin. """
try:
plugins = self._get_plugins_json()
except self._url_opener.url_open_exceptions:
return '0.0'
mapping = dict((plugin['key'], self.__normalize_version_build(plugin['version'])) for plugin in plugins)
return mapping.get(plugin, '0.0')
def plugins_url(self) -> str:
""" Return the url to the plugin update center. """
return self.url() + 'updatecenter/'
def default_quality_profile(self, language: str) -> str:
""" Return the default quality profile for the language. """
url = self._quality_profiles_api_url
try:
profiles = self._get_json(url)['profiles']
except self._url_opener.url_open_exceptions + (KeyError, TypeError): # pylint: wrong-exception-operation
# Try old API
url = self._old_quality_profiles_api_url
try:
profiles = self._get_json(url)
except self._url_opener.url_open_exceptions:
return '' # Give up
for profile in profiles:
if profile.get("language") == language:
for keyword in ('isDefault', 'default'):
if profile.get(keyword):
return profile['name']
logging.warning("Couldn't find a default quality profile for %s in %s, retrieved from %s", language, profiles,
url)
return ''
def quality_profiles_url(self) -> str:
""" Return the quality profiles url. """
return self.url() + 'profiles/'
@functools.lru_cache(maxsize=4096)
def is_branch_plugin_installed(self) -> bool:
""" Return whether SonarQube has the branch plugin installed, which is needed for interpreting Sonar keys. """
try:
plugins = self._get_plugins_json()
if "branch" in [item["key"] for item in plugins]:
return True
logging.info("Branch plugin not installed.")
except self._url_opener.url_open_exceptions as reason:
logging.warning("Couldn't open %s: %s", self._plugin_api_url, reason)
except (TypeError, ValueError) as reason:
logging.error("Error parsing response from %s: '%s'. "
"Assume the branch plugin is not installed.", self._plugin_api_url, reason)
return False
def _get_plugins_json(self):
return self._get_json(self._plugin_api_url)
def _get_valid_license_json(self):
return self._get_json(self._is_valid_license_api_url)
@functools.lru_cache(maxsize=4096)
def is_component_absent(self, product: str) -> bool:
""" Checks if the component with complete name, including branch, is defined """
url = self._components_show_api_url.format(component=product)
try:
if json.loads(self._url_opener.url_read(url, log_error=False))["component"]:
logging.info("Component '%s' found. No branch is defined.", product)
return False
except (ValueError, KeyError):
pass
except self._url_opener.url_open_exceptions:
pass
return True
# Sonar projects
def _has_project(self, project: str, branch) -> bool:
""" Return whether Sonar has the project (analysis). """
version = self.version_number()
if not version:
return False
if version >= "6.3":
# We use the components/search API and not the project_analyses/search API because the former supports
# searching for subprojects and the latter does not.
url = self._add_branch_param_to_url(self._components_search_api_url.format(component=project), branch)
try:
return self._has_paging_total(project, url)
# pylint: wrong-exception-operation
except self._url_opener.url_open_exceptions + (KeyError, IndexError, TypeError, ValueError) as reason:
logging.warning("Sonar has no analysis of %s: %s", project, reason)
return False
else:
found = project in self.__projects(branch)
if not found:
logging.warning("Sonar has no analysis of %s", project)
return found
def _has_paging_total(self, project, url) -> bool:
count = int(self._get_json(url)["paging"]["total"])
if count == 0:
logging.warning("Sonar has no analysis of %s", project)
return count > 0
def __projects(self, branch) -> List[str]:
""" Return all projects in Sonar. """
try:
projects_json = self._get_json(self._add_branch_param_to_url(self._projects_api_url, branch))
return [project['k'] for project in projects_json]
except self._url_opener.url_open_exceptions:
return []
# Metrics
@extract_branch_decorator
def ncloc(self, product: str, branch: str) -> int:
""" Non-comment lines of code. """
return int(self._metric(product, 'ncloc', branch))
@extract_branch_decorator
def lines(self, product: str, branch: str) -> int:
""" Bruto lines of code, including comments, whitespace, javadoc. """
return int(self._metric(product, 'lines', branch))
@extract_branch_decorator
def major_violations(self, product: str, branch: str) -> int:
""" Return the number of major violations for the product. """
return int(self._metric(product, 'major_violations', branch))
@extract_branch_decorator
def critical_violations(self, product: str, branch: str) -> int:
""" Return the number of critical violations for the product. """
return int(self._metric(product, 'critical_violations', branch))
@extract_branch_decorator
def blocker_violations(self, product: str, branch: str) -> int:
""" Return the number of blocker violations for the product. """
return int(self._metric(product, 'blocker_violations', branch))
@extract_branch_decorator
def duplicated_lines(self, product: str, branch: str) -> int:
""" Return the number of duplicated lines for the product. """
return int(self._metric(product, 'duplicated_lines', branch))
@extract_branch_decorator
def unittest_line_coverage(self, product: str, branch: str) -> float:
""" Return the line coverage of the unit tests for the product. """
return float(self._metric(product, 'line_coverage', branch))
@extract_branch_decorator
def unittest_branch_coverage(self, product: str, branch: str) -> float:
""" Return the branch coverage of the unit tests for the product. """
return float(self._metric(product, 'branch_coverage', branch))
@extract_branch_decorator
def unittests(self, product: str, branch: str) -> int:
""" Return the number of unit tests for the product. """
return int(self._metric(product, 'tests', branch))
@extract_branch_decorator
def failing_unittests(self, product: str, branch: str) -> int:
""" Return the number of failing unit tests for the product. """
failures = int(self._metric(product, 'test_failures', branch))
errors = int(self._metric(product, 'test_errors', branch))
return failures + errors if failures >= 0 and errors >= 0 else -1
@extract_branch_decorator
def methods(self, product: str, branch: str) -> int:
""" Return the number of methods/functions in the product. """
return int(self._metric(product, 'functions', branch))
@extract_branch_decorator
def dashboard_url(self, product: str, branch: str) -> str:
""" Return the url for the Sonar dashboard for the product. """
return self._add_branch_param_to_url(self._base_dashboard_url.format(project=product), branch)
# Violations
@extract_branch_decorator
def complex_methods(self, product: str, branch: str) -> int:
""" Return the number of methods that violate the Cyclomatic complexity threshold. """
rule_names = ('checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.CyclomaticComplexityCheck',
'pmd:CyclomaticComplexity',
'squid:MethodCyclomaticComplexity',
'csharpsquid:S1541',
'csharpsquid:FunctionComplexity',
'javascript:FunctionComplexity',
'Web:ComplexityCheck',
'python:FunctionComplexity',
'vbnet:S1541',
'tslint:cyclomatic-complexity')
for rule_name in rule_names:
nr_complex_methods = self._rule_violation(product, rule_name, 0, branch)
if nr_complex_methods:
return nr_complex_methods
return 0
@extract_branch_decorator
def long_methods(self, product: str, branch: str) -> int:
""" Return the number of methods in the product that have to many non-comment statements. """
# NB: There is no long methods rule for C# and VB.NET. How to deal with this? FIXME
rule_names = ('squid:S138',
'checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.JavaNCSSCheck',
'Pylint:R0915',
'Web:LongJavaScriptCheck')
for rule_name in rule_names:
nr_long_methods = self._rule_violation(product, rule_name, 0, branch)
if nr_long_methods:
return nr_long_methods
return 0
@extract_branch_decorator
def many_parameters_methods(self, product: str, branch: str) -> int:
""" Return the number of methods in the product that have too many parameters. """
rule_names = ('checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.ParameterNumberCheck',
'pmd:ExcessiveParameterList',
'csharpsquid:S107',
'squid:S00107',
'javascript:ExcessiveParameterList',
'python:S107')
for rule_name in rule_names:
nr_many_parameters = self._rule_violation(product, rule_name, 0, branch)
if nr_many_parameters:
return nr_many_parameters
return 0
@extract_branch_decorator
def commented_loc(self, product: str, branch: str) -> int:
""" Return the number of commented out lines in the source code of the product. """
rule_names = ('csharpsquid:S125', 'csharpsquid:CommentedCode', 'squid:CommentedOutCodeLine',
'javascript:CommentedCode', 'python:S125', 'Web:AvoidCommentedOutCodeCheck')
for rule_name in rule_names:
nr_commented_loc = self._rule_violation(product, rule_name, 0, branch)
if nr_commented_loc:
return nr_commented_loc
return 0
@extract_branch_decorator
def suppressions(self, product: str, branch: str) -> int:
""" Return the number of violation suppressions in the source code of the product. """
total = 0
for rule_name in self.suppression_rules:
suppressions = self._rule_violation(product, rule_name, -1, branch)
if suppressions == -1:
return -1
total += suppressions
return total
@extract_branch_decorator
def suppressions_url(self, product: str, branch: str) -> str:
""" Return the url for the suppression of rules in the source code. """
return self._add_branch_param_to_url(self._suppressions_url.format(component=product), branch)
@extract_branch_decorator
def violations_url(self, product: str, branch: str) -> str:
""" Return the url for the violations of the product. """
return self._add_branch_param_to_url(self._base_violations_url.format(component=product), branch)
# Issues
@extract_branch_decorator
def false_positives(self, product: str, branch: str) -> int:
""" Return the number of false positives listed for the product. """
return self.__number_of_issues(product, branch, self._false_positives_api_url.format(resource=product), 0)
@extract_branch_decorator
def false_positives_url(self, product: str, branch: str) -> str:
""" Return the url to the list of false positives. """
return self._add_branch_param_to_url(self._false_positives_url.format(resource=product), branch)
@extract_branch_decorator
def wont_fix(self, product: str, branch: str) -> int:
""" Return the number of won't fix issues listed for the product. """
return self.__number_of_issues(product, branch, self._wont_fix_api_url.format(resource=product), 0)
@extract_branch_decorator
def wont_fix_url(self, product: str, branch: str) -> str:
""" Return the url to the list of won't fix issues. """
return self._add_branch_param_to_url(self._wont_fix_url.format(resource=product), branch)
# Meta data
def datetime(self, *products: str) -> DateTime:
""" Return the date and time of the last analysis of the product. """
if not products:
return datetime.datetime.min
split_branch = extract_branch_decorator(lambda this, x, a: (x, a))
product, branch = split_branch(self, products[0])
server_version = self.version_number()
if server_version and LooseVersion(server_version) >= LooseVersion('6.4'):
# Use the components API, it should contain the analysis date both for projects and components
url = self._add_branch_param_to_url(self._components_show_api_url.format(component=product), branch)
try:
components_json = self._get_json(url)
try:
datetime_string = components_json['component']['analysisDate']
datetime_string = datetime_string.split('+')[0] # Ignore timezone
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S')
except (TypeError, KeyError, IndexError) as reason:
logging.error("Couldn't get date of last analysis of %s from JSON %s (retrieved from %s): %s",
product, components_json, url, reason)
except self._url_opener.url_open_exceptions:
pass
return datetime.datetime.min
# Use analyses API:
url = self._add_branch_param_to_url(self._analyses_api_url.format(project=product), branch)
try:
components_json = self._get_json(url, log_error=False)['analyses']
except self._url_opener.url_open_exceptions:
# Try older API:
url = self._add_branch_param_to_url(self._resource_api_url.format(resource=product), branch)
try:
components_json = self._get_json(url)
except self._url_opener.url_open_exceptions:
return datetime.datetime.min
try:
datetime_string = components_json[0]['date']
except (TypeError, KeyError, IndexError) as reason:
logging.error("Couldn't get date of last analysis of %s from JSON %s (retrieved from %s): %s",
product, components_json, url, reason)
return datetime.datetime.min
datetime_string = datetime_string.split('+')[0] # Ignore timezone
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S')
# Helper methods
def _get_measure_value(self, url: str, metric_name: str, product: str):
""" Gets measures. """
measures_json = self._get_json(url, log_error=False)
try:
for measure in measures_json['component']['measures']:
if measure['metric'] == metric_name:
return float(measure['value'])
failure_reason = 'metric not found in component measures'
except (TypeError, KeyError, IndexError, ValueError) as reason:
failure_reason = reason
logging.warning("Can't get %s value for %s from %s (retrieved from %s): %s", metric_name, product,
measures_json, url, failure_reason)
return -1
@functools.lru_cache(maxsize=4096)
def _metric(self, product: str, metric_name: str, branch: str) -> Number:
""" Return a specific metric value for the product. """
try:
return self._get_measure_of_product(branch, metric_name, product)
except self._url_opener.url_open_exceptions:
pass # Keep going, and try the old API
url = self._add_branch_param_to_url(
self._resource_api_url.format(resource=product) + '&metrics=' + metric_name, branch)
try:
measures_json = self._get_json(url)
try:
return float(measures_json[0]["msr"][0]["val"])
except (TypeError, KeyError, IndexError, ValueError) as reason:
logging.warning("Can't get %s value for %s from %s (retrieved from %s): %s", metric_name, product,
measures_json, url, reason)
return -1
except self._url_opener.url_open_exceptions:
return -1
def _get_measure_of_product(self, branch, metric_name, product):
if not self._has_project(product, branch):
return -1
url = self._add_branch_param_to_url(
self._measures_api_url.format(component=product, metric=metric_name), branch)
return self._get_measure_value(url, metric_name, product)
def _rule_violation(self, product: str, rule_name: str, default: int = 0, branch: str = None) -> int:
""" Return a specific violation value for the product. """
if not self._has_project(product, branch):
return -1
try:
issues_json = self._get_json(
self._add_branch_param_to_url(self._issues_api_url.format(component=product, rule=rule_name), branch))
except self._url_opener.url_open_exceptions:
return default
return int(issues_json['paging']['total'])
def __number_of_issues(self, product: str, branch: str, url: str, default: int = 0) -> int:
""" Return the number of issues retrieved by given url. """
if not self._has_project(product, branch):
return -1
try:
issues_json = self._get_json(self._add_branch_param_to_url(url, branch))
except self._url_opener.url_open_exceptions:
return default
return int(issues_json['total'])
def __time_to_fix(self, branch: str, url: str, default: str = '-') -> str:
""" Return the number of issues retrieved by given url. """
try:
total_minutes = 0
issues = self.__get_all_issues(url, branch)
for issue in issues:
total_minutes += self.__add_effort(issue['effort']) if 'effort' in issue else 0
return self.__format_time_to_fix(total_minutes)
except self._url_opener.url_open_exceptions:
pass
return default
def __get_all_issues(self, url: str, branch: str) -> List:
url += '&pageSize=-1&pageIndex={page_index}'
page_index = 1
result_list = []
while True:
current_json = self._get_json(self._add_branch_param_to_url(url.format(page_index=page_index), branch))
result_list.extend(current_json['issues'])
if page_index * current_json['paging']['pageSize'] >= current_json['paging']['total']:
break
page_index += 1
return result_list
@staticmethod
def __format_time_to_fix(total_minutes: int) -> str:
hours, minutes = divmod(total_minutes, 60)
return '{0}h {1:02}min'.format(hours, minutes) if hours > 0 else '{0:2}min'.format(minutes)
@staticmethod
def __add_effort(effort: str) -> int:
# Use a regex pattern to capture days, hours and minutes
pattern = r'^((?P<days>\d+)d)? *((?P<hours>\d+)h)? *((?P<minutes>\d+)min)?$'
match = re.match(pattern, effort)
if not match:
logging.warning('Invalid format of field effort: %s', effort)
return 0
multipliers = {'days': 24 * 60, 'hours': 60, 'minutes': 1}
match_dict = match.groupdict()
return sum([multipliers[key] * int(match_dict[key])
for key in match_dict if match_dict[key]])
class Sonar7(Sonar6):
""" Class representing the Sonar instance, for apis supported in versions 5.x and 6.x. """
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
metric_source_name = 'SonarQube'
def _init_from_facade(self, sonar_url: str):
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name
self._base_dashboard_url = sonar_url + 'dashboard?id={project}'
self._base_violations_url = sonar_url + 'project/issues?id={component}&resolved=false'
self._suppressions_url = sonar_url + "project/issues?id={component}&" + \
f"rules={','.join(self.suppression_rules)}"
self._violations_type_severity_url = sonar_url + \
'project/issues?id={component}&resolved=false&types={type}&severities={severities}'
self._issues_api_url = sonar_url + 'api/issues/search?componentKeys={component}&resolved=false&rules={rule}'
self._issues_by_type_api_url = sonar_url + \
'api/issues/search?componentKeys={component}&resolved=false&types={type}'
self._issues_by_type_and_severity_api_url = sonar_url + \
'api/issues/search?componentKeys={component}&resolved=false&types={type}&severities={severities}'
self._analyses_api_url = sonar_url + 'api/project_analyses/search?project={project}&format=json&ps=1'
self._components_show_api_url = sonar_url + 'api/components/show?component={component}'
self._components_search_api_url = sonar_url + 'api/components/search?qualifiers=BRC,TRK&q={component}'
self._measures_api_url = sonar_url + 'api/measures/component?component={component}&metricKeys={metric}'
self._false_positives_api_url = sonar_url + \
'api/issues/search?resolutions=FALSE-POSITIVE&componentKeys={resource}'
self._false_positives_url = sonar_url + 'project/issues?id={resource}&resolutions=FALSE-POSITIVE'
self._wont_fix_api_url = sonar_url + 'api/issues/search?resolutions=WONTFIX&componentKeys={resource}'
self._wont_fix_url = sonar_url + 'project/issues?id={resource}&resolutions=WONTFIX'
self._plugin_api_url = sonar_url + 'api/plugins/installed'
self._is_valid_license_api_url = sonar_url + 'api/editions/is_valid_license'
self._quality_profiles_api_url = sonar_url + 'api/qualityprofiles/search?format=json'
logging.info("Sonar class instantiated as Sonar7.")
def is_branch_name_included(self, product: str) -> bool:
""" Checks if the component name includes the branch name. """
return self._is_commercialy_licensed() or\
(self.is_branch_plugin_installed() and self.is_component_absent(product))
def _get_plugins_json(self):
return self._get_json(self._plugin_api_url)['plugins']
@extract_branch_decorator
def version(self, product: str, branch: str) -> str:
""" Return the version of the product. """
url = self._add_branch_param_to_url(
self._analyses_api_url.format(project=product) + '&category=VERSION', branch)
try:
analyses_json = self._get_json(url, log_error=False)
try:
return analyses_json['analyses'][0]['events'][0]['name']
except (KeyError, IndexError) as reason:
logging.warning("Couldn't get version number of %s from JSON %s (retrieved from %s): %s",
product, analyses_json, url, reason)
return '?'
except self._url_opener.url_open_exceptions:
return '?'
def datetime(self, *products: str) -> DateTime:
""" Return the date and time of the last analysis of the product. """
if products:
split_branch = extract_branch_decorator(lambda this, x, a: (x, a))
product, branch = split_branch(self, products[0])
if product:
url = self._add_branch_param_to_url(self._components_show_api_url.format(component=product), branch)
try:
components_json = self._get_json(url)
try:
datetime_string = components_json['component']['analysisDate']
datetime_string = datetime_string.split('+')[0] # Ignore timezone
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S')
except (ValueError, KeyError) as reason:
logging.error("Couldn't get date of last analysis of %s from JSON %s (retrieved from %s): %s",
product, components_json, url, reason)
except self._url_opener.url_open_exceptions:
pass
return datetime.datetime.min
def _has_project(self, project: str, branch) -> bool:
""" Return whether Sonar has the project (analysis). """
if self.version_number():
url = self._add_branch_param_to_url(self._components_search_api_url.format(component=project), branch)
try:
return self._has_paging_total(project, url)
# pylint: wrong-exception-operation
except self._url_opener.url_open_exceptions + (KeyError, IndexError, TypeError, ValueError) as reason:
logging.warning("Sonar has no analysis of %s: %s", project, reason)
return False
def __get_rule_violations(self, product, branch, rule_names, function_name):
try:
return self._rule_violation(product, rule_names, -1, branch)
except KeyError as reason:
logging.error("Error parsing json response in %s: %s", function_name, reason)
return -1
@extract_branch_decorator
def many_parameters_methods(self, product: str, branch: str) -> int:
""" Return the number of methods in the product that have too many parameters. """
rule_names = 'checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.ParameterNumberCheck,' \
'pmd:ExcessiveParameterList,' \
'csharpsquid:S107,' \
'squid:S00107,' \
'javascript:ExcessiveParameterList,' \
'python:S107'
return self.__get_rule_violations(product, branch, rule_names, 'many_parameters_methods')
@extract_branch_decorator
def long_methods(self, product: str, branch: str) -> int:
""" Return the number of methods in the product that have to many non-comment statements. """
# NB: There is no long methods rule for C# and VB.NET. How to deal with this? FIXME
rule_names = 'squid:S138,' \
'checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.JavaNCSSCheck,' \
'Pylint:R0915,' \
'Web:LongJavaScriptCheck'
return self.__get_rule_violations(product, branch, rule_names, 'long_methods')
@extract_branch_decorator
def complex_methods(self, product: str, branch: str) -> int:
""" Return the number of methods that violate the Cyclomatic complexity threshold. """
rule_names = 'checkstyle:com.puppycrawl.tools.checkstyle.checks.metrics.CyclomaticComplexityCheck,' \
'pmd:CyclomaticComplexity,' \
'squid:MethodCyclomaticComplexity,' \
'csharpsquid:S1541,' \
'csharpsquid:FunctionComplexity,' \
'javascript:FunctionComplexity,' \
'Web:ComplexityCheck,' \
'python:FunctionComplexity,' \
'vbnet:S1541,' \
'tslint:cyclomatic-complexity'
return self.__get_rule_violations(product, branch, rule_names, 'complex_methods')
@extract_branch_decorator
def commented_loc(self, product: str, branch: str) -> int:
""" Return the number of commented out lines in the source code of the product. """
rule_names = 'csharpsquid:S125,' \
'csharpsquid:CommentedCode,' \
'squid:CommentedOutCodeLine,' \
'javascript:CommentedCode,' \
'python:S125,' \
'Web:AvoidCommentedOutCodeCheck'
return self.__get_rule_violations(product, branch, rule_names, 'commented_loc')
@extract_branch_decorator
def suppressions(self, product: str, branch: str) -> int:
""" Return the number of suppressions in the source code of the product. """
return self.__get_rule_violations(product, branch, ','.join(self.suppression_rules), 'suppressions')
@functools.lru_cache(maxsize=4096)
def _metric(self, product: str, metric_name: str, branch: str) -> Number:
""" Return a specific metric value for the product. """
try:
return self._get_measure_of_product(branch, metric_name, product)
except self._url_opener.url_open_exceptions:
return -1
def default_quality_profile(self, language: str) -> str:
""" Return the default quality profile for the language. """
url = self._quality_profiles_api_url
try:
profiles = self._get_json(url)['profiles']
except self._url_opener.url_open_exceptions + (KeyError, TypeError): # pylint: wrong-exception-operation
return '' # Give up
for profile in profiles:
if profile.get("language") == language:
for keyword in ('isDefault', 'default'):
if profile.get(keyword):
return profile['name']
logging.warning("Couldn't find a default quality profile for %s in %s, retrieved from %s", language, profiles,
url)
return ''
| 49.63082
| 119
| 0.643644
|
761ecae0961721822835283f6d97e540c224bf59
| 1,579
|
py
|
Python
|
pyston/order/parsers.py
|
zamazaljiri/django-pyston
|
38adee8e96fc807e5b529f6f18093bf2a920e8d5
|
[
"BSD-3-Clause"
] | 7
|
2017-03-23T15:00:30.000Z
|
2021-02-24T17:41:15.000Z
|
pyston/order/parsers.py
|
zamazaljiri/django-pyston
|
38adee8e96fc807e5b529f6f18093bf2a920e8d5
|
[
"BSD-3-Clause"
] | 36
|
2016-05-11T07:51:08.000Z
|
2022-02-24T14:12:48.000Z
|
pyston/order/parsers.py
|
zamazaljiri/django-pyston
|
38adee8e96fc807e5b529f6f18093bf2a920e8d5
|
[
"BSD-3-Clause"
] | 29
|
2016-05-01T21:26:30.000Z
|
2022-03-18T17:17:04.000Z
|
from pyston.utils import LOOKUP_SEP
from .utils import DIRECTION
class OrderParserError(Exception):
"""
Exception that is raised if order input is invalid.
"""
pass
class OrderTerm:
"""
Simple order term that contains order identifiers list, direction and source inpout value which is used to assemble
error messages.
"""
def __init__(self, identifiers, direction, source):
self.identifiers = identifiers
self.direction = direction
self.source = source
class OrderParser:
"""
Abstract order parser.
"""
def parse(self, request):
"""
:param request: Django HTTP request.
:return: returns list of order terms.
"""
raise NotImplementedError
class DefaultOrderParser:
"""
Default order parser that accepts filter.
E.q.:
/api/user?order=first_name,-created_at
"""
def _clean_order_term(self, ordering_string):
ordering_string = ordering_string.strip()
if ordering_string.startswith('-'):
direction = DIRECTION.DESC
ordering_string = ordering_string[1:]
else:
direction = DIRECTION.ASC
identifiers = ordering_string.split(LOOKUP_SEP)
return OrderTerm(identifiers, direction, ordering_string)
def parse(self, request):
order_fields = request._rest_context.get('order')
if order_fields:
return (self._clean_order_term(ordering_string) for ordering_string in order_fields.split(','))
else:
return None
| 25.467742
| 119
| 0.648512
|
0b48b19a97bde11781e95b510d2c54d7f94b1437
| 1,616
|
py
|
Python
|
display.py
|
weightan/SchrodingerWellPython
|
ceed86cdbe935f523be61616c1b81877c9d5d74f
|
[
"MIT"
] | 21
|
2021-03-22T08:26:44.000Z
|
2022-02-15T02:10:43.000Z
|
display.py
|
weightan/SchrodingerWellPython
|
ceed86cdbe935f523be61616c1b81877c9d5d74f
|
[
"MIT"
] | 1
|
2021-03-28T02:28:21.000Z
|
2021-03-28T02:28:21.000Z
|
display.py
|
weightan/SchrodingerWellPython
|
ceed86cdbe935f523be61616c1b81877c9d5d74f
|
[
"MIT"
] | 2
|
2021-03-24T23:35:20.000Z
|
2021-03-28T02:22:13.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
e_vec = np.load('data_E_vectors_circle200x200e120.npy')
N = 300
print('done load')
def cMap1():
v = 10
k = 256
vals = np.ones((k, 4))
vals[:, 0] = np.array([(i % v)/v for i in range(k)])
vals[:, 1] = np.array([((i + 5) % v)/v for i in range(k)])
vals[:, 2] = np.array([((i + 7) % v)/v for i in range(k)])
newcmp = ListedColormap(vals)
return newcmp
def cMap2():
colors = [(234/255, 230/255, 202/255),
(114/255, 0, 0),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(30/255, 23/255, 20/255),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(30/255, 23/255, 20/255),
(234/255, 230/255, 202/255),
(30/255, 23/255, 20/255),
(114/255, 0, 0)] # R -> G -> B
cmap = LinearSegmentedColormap.from_list('my_list', colors, N=30)
return cmap
for i in range(100, 120):
figure(num = None, figsize=(6, 6), dpi=300)
plt.axis('off')
temp = pow( np.absolute( e_vec[:,i].reshape(N,N) ) ,2)
newcmp = cMap2()
#newcmp = 'flag_r'
plot = plt.imshow(temp, cmap = newcmp, interpolation='lanczos')
plt.savefig( 'A' + str(i) + 'test' + '.png', bbox_inches = 'tight')
print(' + ' + str(i))
plt.show()
plt.close()
print('done saving')
| 22.760563
| 71
| 0.530322
|
7ac77ed90febbec89de9ce4985c76b4be6ae614b
| 6,832
|
py
|
Python
|
scripts/us_epa/superfund/site_hazards/process_sites_hazards.py
|
rpatil524/data
|
9e76c7f22a75ad4e52522444a080ed3f5c6da7dd
|
[
"Apache-2.0"
] | null | null | null |
scripts/us_epa/superfund/site_hazards/process_sites_hazards.py
|
rpatil524/data
|
9e76c7f22a75ad4e52522444a080ed3f5c6da7dd
|
[
"Apache-2.0"
] | null | null | null |
scripts/us_epa/superfund/site_hazards/process_sites_hazards.py
|
rpatil524/data
|
9e76c7f22a75ad4e52522444a080ed3f5c6da7dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposure and risks of superfund sites to natural hazards
This dataset is associated with the following publication: Summers, K., A. Lamaper, and K. Buck. National Hazards Vulnerability and the Remediation, Restoration and Revitalization of Contaminated Sites – 1. Superfund. ENVIRONMENTAL MANAGEMENT. Springer-Verlag, New York, NY, USA, 14, (2021).
This script proecsses the file:
- ./data/SF_CRSI_OLEM.xlsx
The dataset lists all active and upcoming Superfund sites and their vulnerability to 12 natural hazards using a vulnerability score between 0 and 100. Additional risk/exposure metrices are also imported.
"""
import os
from absl import app, flags
import pandas as pd
FLAGS = flags.FLAGS
flags.DEFINE_string('site_hazard_input_path', './data',
'Path to the directory with input files')
flags.DEFINE_string(
'site_hazard_output_path', './data/output',
'Path to the directory where generated files are to be stored.')
_RISK_TEMPLATE_MCF = """Node: E:SuperfundSite->E0
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HurricaneEvent
value: C:SuperfundSite->HURR_EXP
Node: E:SuperfundSite->E1
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_TornadoEvent
value: C:SuperfundSite->TORN_EXP
Node: E:SuperfundSite->E2
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_LandslideEvent
value: C:SuperfundSite->LSLD_EXP
Node: E:SuperfundSite->E3
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_ExtremeColdWindChillEvent
value: C:SuperfundSite->LTMP_EXP
Node: E:SuperfundSite->E4
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_ExcessiveHeatEvent
value: C:SuperfundSite->HTMP_EXP
Node: E:SuperfundSite->E5
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HailEvent
value: C:SuperfundSite->HAIL_EXP
Node: E:SuperfundSite->E6
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_WildfireEvent
value: C:SuperfundSite->FIRE_EXP
Node: E:SuperfundSite->E7
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_EarthquakeEvent
value: C:SuperfundSite->EQ_EXP
Node: E:SuperfundSite->E8
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_DroughtEvent
value: C:SuperfundSite->DRGH_EXP
Node: E:SuperfundSite->E9
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_FloodEvent
value: C:SuperfundSite->IFLD_EXP
Node: E:SuperfundSite->E10
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_CoastalFloodEvent
value: C:SuperfundSite->CFLD_EXP
Node: E:SuperfundSite->E11
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HighWindEvent
value: C:SuperfundSite->WIND_EXP
Node: E:SuperfundSite->E12
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardExposureScore_SuperfundSite
value: C:SuperfundSite->EXPOSURE_SCORE
Node: E:SuperfundSite->E13
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite
value: C:SuperfundSite->RISK_SCORE
Node: E:SuperfundSite->E14
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:CrsiScore_SuperfundSite
value: C:SuperfundSite->CRSI_SCORE
"""
_DATASET_NAME = "./SF_CRSI_OLEM.xlsx"
_DATA_COLS = [
'Site_EPA_ID', 'CFLD_EXP', 'IFLD_EXP', 'DRGH_EXP', 'EQ_EXP', 'FIRE_EXP',
'HAIL_EXP', 'HTMP_EXP', 'LTMP_EXP', 'HURR_EXP', 'LSLD_EXP', 'TORN_EXP',
'WIND_EXP', 'EXPOSURE_SCORE', 'RISK_SCORE', 'CRSI_SCORE'
]
def process_site_hazards(input_path: str, output_path: str) -> int:
"""
Processes ./SF_CRSI_OLEM.xlsx to generate clean csv and tmcf files
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
risk_score = pd.read_excel(os.path.join(input_path, _DATASET_NAME),
usecols=_DATA_COLS)
risk_score[
'Site_EPA_ID'] = 'epaSuperfundSiteId/' + risk_score['Site_EPA_ID']
risk_score['observationDate'] = 2021
risk_score.to_csv(os.path.join(output_path, 'superfund_hazardExposure.csv'),
index=False)
f = open(os.path.join(output_path, 'superfund_hazardExposure.tmcf'), 'w')
f.write(_RISK_TEMPLATE_MCF)
f.close()
site_count = len(risk_score['Site_EPA_ID'].unique())
return int(site_count)
def main(_) -> None:
site_count = process_site_hazards(FLAGS.site_hazard_input_path,
FLAGS.site_hazard_output_path)
print(f"Processing of {site_count} superfund sites is complete.")
if __name__ == '__main__':
app.run(main)
| 37.745856
| 292
| 0.804157
|
337ac7e6c1ff4b6607d6b890ba189eb16076ed20
| 8,174
|
py
|
Python
|
config/settings/production.py
|
mabdullahadeel/facegram
|
f0eaa42008e876ae892b50f9f621a25b17cc70d5
|
[
"MIT"
] | 1
|
2021-09-26T13:37:22.000Z
|
2021-09-26T13:37:22.000Z
|
config/settings/production.py
|
mabdullahadeel/facegram
|
f0eaa42008e876ae892b50f9f621a25b17cc70d5
|
[
"MIT"
] | 1
|
2021-08-08T22:04:39.000Z
|
2021-08-08T22:04:39.000Z
|
config/settings/production.py
|
mabdullahadeel/facegram
|
f0eaa42008e876ae892b50f9f621a25b17cc70d5
|
[
"MIT"
] | null | null | null |
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["facegram.io"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# https://github.com/jazzband/django-redis#memcached-exceptions-behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#cloudfront
AWS_S3_CUSTOM_DOMAIN = env("DJANGO_AWS_S3_CUSTOM_DOMAIN", default=None)
aws_s3_domain = AWS_S3_CUSTOM_DOMAIN or f"{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com"
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "facegram.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{aws_s3_domain}/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="facegram <noreply@facegram.io>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX",
default="[facegram]",
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
ANYMAIL = {}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
integrations = [
sentry_logging,
DjangoIntegration(),
CeleryIntegration(),
RedisIntegration(),
]
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=integrations,
environment=env("SENTRY_ENVIRONMENT", default="production"),
traces_sample_rate=env.float("SENTRY_TRACES_SAMPLE_RATE", default=0.0),
)
# Your stuff...
# ------------------------------------------------------------------------------
| 40.26601
| 87
| 0.628334
|
6a511fe67362b107ddbfac5c119c5fb20941b8d7
| 903
|
py
|
Python
|
setup.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | 5
|
2015-05-03T08:49:11.000Z
|
2022-03-23T11:44:00.000Z
|
setup.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
setup(name="axelrod-ct",
version="1.0",
packages=['madsenlab',
'madsenlab.axelrod',
'madsenlab.axelrod.utils',
'madsenlab.axelrod.analysis',
'madsenlab.axelrod.data',
'madsenlab.axelrod.traits',
'madsenlab.axelrod.population',
'madsenlab.axelrod.rules'],
author='Mark E. Madsen',
author_email='mark@madsenlab.org',
url='https://github.com/mmadsen/axelrod-ct',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
]
)
| 34.730769
| 63
| 0.548173
|
4f6e69c6756cd12dd7a118773d9ca39b338646db
| 419
|
py
|
Python
|
syphon/errors.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 3
|
2019-03-05T15:36:00.000Z
|
2019-08-01T18:33:40.000Z
|
syphon/errors.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 32
|
2019-02-27T15:12:52.000Z
|
2020-10-04T17:39:45.000Z
|
syphon/errors.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 3
|
2019-09-26T16:47:17.000Z
|
2020-03-18T14:38:31.000Z
|
"""syphon.errors.py
Copyright Keithley Instruments, LLC.
Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE)
"""
class InconsistentMetadataError(BaseException):
def __init__(self, column: str):
super().__init__()
self.column = column
class MalformedLineError(BaseException):
def __init__(self, line: str):
super().__init__()
self.line = line
| 22.052632
| 79
| 0.682578
|
0c1e202b2d58dddaff8ed8314e6008fc15d64f0c
| 1,128
|
py
|
Python
|
api/tacticalrmm/checks/tasks.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | 1
|
2020-08-14T20:42:31.000Z
|
2020-08-14T20:42:31.000Z
|
api/tacticalrmm/checks/tasks.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | 5
|
2021-04-08T19:44:31.000Z
|
2021-09-22T19:34:33.000Z
|
api/tacticalrmm/checks/tasks.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | null | null | null |
import datetime as dt
import random
from time import sleep
from tacticalrmm.celery import app
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone as djangotime
from agents.models import Agent
from clients.models import Client, Site
@app.task
def handle_check_email_alert_task(pk):
from .models import Check
check = Check.objects.get(pk=pk)
# first time sending email
if not check.email_sent:
sleep(random.randint(1, 10))
check.send_email()
check.email_sent = djangotime.now()
check.save(update_fields=["email_sent"])
else:
# send an email only if the last email sent is older than 24 hours
delta = djangotime.now() - dt.timedelta(hours=24)
if check.email_sent < delta:
sleep(random.randint(1, 10))
check.send_email()
check.email_sent = djangotime.now()
check.save(update_fields=["email_sent"])
return "ok"
@app.task
def run_checks_task(pk):
agent = Agent.objects.get(pk=pk)
agent.salt_api_async(func="win_agent.run_manual_checks")
return "ok"
| 26.857143
| 74
| 0.687943
|
7dc97b3c9db29759eb1727328f78dbe5138e90c4
| 403
|
py
|
Python
|
maccorcyclingdata/.ipynb_checkpoints/validation_check_charging-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 2
|
2021-03-29T15:34:22.000Z
|
2022-03-12T13:52:40.000Z
|
maccorcyclingdata/.ipynb_checkpoints/validation_check_charging-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 10
|
2020-08-25T22:25:59.000Z
|
2021-08-23T20:51:10.000Z
|
maccorcyclingdata/.ipynb_checkpoints/validation_check_charging-checkpoint.py
|
jasonkuo88/maccorcyclingdata
|
dffcc5bbb4135f025b44303243928f8f0b121af9
|
[
"MIT"
] | 2
|
2020-10-12T20:48:35.000Z
|
2021-10-02T00:11:26.000Z
|
def validation_check_charging(validation_df, df, charge_steps):
if df['step'][i] in charge_steps:
if df['current_ma'][i] <= 0:
validation_df = validation_df.append({'time':datetime.now().strftime("%d/%m/%Y %H:%M:%S"), 'run': 'in progress', 'cell_num': cell_id, 'row_number': i, 'error': 'error - current is negative during charging'}, ignore_index=True)
return validation_df
| 67.166667
| 238
| 0.669975
|
582270d824a02b5a07038271aeb40ee9f3234408
| 864
|
py
|
Python
|
examples/subscribe_topic_testnet.py
|
wensheng/hcs-grpc-api-py-client
|
8c393b8ad145f11f8d1a6e5881fd51de8a9ee0a3
|
[
"Apache-2.0"
] | null | null | null |
examples/subscribe_topic_testnet.py
|
wensheng/hcs-grpc-api-py-client
|
8c393b8ad145f11f8d1a6e5881fd51de8a9ee0a3
|
[
"Apache-2.0"
] | null | null | null |
examples/subscribe_topic_testnet.py
|
wensheng/hcs-grpc-api-py-client
|
8c393b8ad145f11f8d1a6e5881fd51de8a9ee0a3
|
[
"Apache-2.0"
] | 1
|
2021-09-12T17:51:04.000Z
|
2021-09-12T17:51:04.000Z
|
import sys
import logging
import grpc
from hcs_grpc_client import ConsensusTopicQuery, TopicID, ConsensusServiceStub
def run(tid: TopicID):
request = ConsensusTopicQuery(topicID=tid)
channel = grpc.insecure_channel("hcs.testnet.mirrornode.hedera.com:5600")
stub = ConsensusServiceStub(channel)
stream = stub.subscribeTopic(request)
try:
for resp in stream:
# print(resp)
timestamp = "{}@{}".format(resp.consensusTimestamp.seconds,
resp.consensusTimestamp.nanos)
print("timestamp", timestamp)
print("message:", resp.message.decode())
print("sequence#:", resp.sequenceNumber)
except grpc.RpcError as e:
print(e)
if __name__ == '__main__':
tid = TopicID(topicNum=int(sys.argv[1]))
logging.basicConfig()
run(tid)
| 30.857143
| 78
| 0.643519
|
0d8782a15884ce8e103cb0a2d2940de09a9d2310
| 3,673
|
py
|
Python
|
python/svm.py
|
DEVESHTARASIA/thundersvm
|
b61f858c4e46962566ba12fadf92fc06aa343852
|
[
"Apache-2.0"
] | 1
|
2021-03-08T22:49:34.000Z
|
2021-03-08T22:49:34.000Z
|
python/svm.py
|
DEVESHTARASIA/thundersvm
|
b61f858c4e46962566ba12fadf92fc06aa343852
|
[
"Apache-2.0"
] | null | null | null |
python/svm.py
|
DEVESHTARASIA/thundersvm
|
b61f858c4e46962566ba12fadf92fc06aa343852
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
dirname = path.dirname(path.abspath(__file__))
libsvm = CDLL(path.join(dirname, '../build/lib/libthundersvm.so'))
dataset_path = dirname + '/../dataset/'
class dataset(object):
def __init__(self):
self.obj = lib.DataSet_new()
def load_from_python(self, arg1, arg2, arg3):
lib.DataSet_load_from_python(self.obj, arg1, arg2, arg3)
'''
def svm_train(param):
param_list = param.split()
param_list.insert(0, 'thundersvm-train')
param_array = (c_char_p * len(param_list))()
param_array[:] = param_list
libsvm.thundersvm_train(len(param_list), param_array)
def svm_predict(param):
param_list = param.split()
param_list.insert(0, 'thundersvm-predict')
param_array = (c_char_p * len(param_list))()
param_array[:] = param_list
libsvm.thundersvm_predict(len(param_list), param_array)
'''
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
file_path = dataset_path + data_file_name
for line in open(file_path):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = features.encode('utf-8')[:-1]
#for e in features.split():
# ind, val = e.split(":")
# xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_train(arg1, arg2 = None, arg3 = None, arg4 = None):
if arg2:
arg1_array = (c_float * len(arg1))()
arg1_array[:] = arg1
#arg2_string_list = [str(d).encode('utf-8')[1:-1] for d in arg2]
arg2_string_list = arg2
arg2_array = (c_char_p * len(arg2_string_list))()
arg2_array[:] = arg2_string_list
#print(arg1_array[0])
#print(arg2_array[0])
arg4_list = arg4.encode('utf-8').split()
arg4_array = (c_char_p * len(arg4_list))()
arg4_array[:] = arg4_list
#dataset_python = dataset();
#dataset_python.load_from_python(arg1, arg2, arg3)
#print(dataset_python)
libsvm.load_from_python_interface(arg1_array, arg2_array, len(arg1_array))
libsvm.thundersvm_train_after_parse(arg4_array, len(arg4_array), arg3.encode('utf-8'))
else:
param_list = arg1.encode('utf-8').split()
param_list.insert(0, 'thundersvm-train')
param_array = (c_char_p * len(param_list))()
param_array[:] = param_list
#print(param_array[0])
libsvm.thundersvm_train(len(param_list), param_array)
def svm_predict(arg1, arg2 = None, arg3 = None, arg4 = None, arg5 = None):
if arg2:
arg1_array = (c_float * len(arg1))()
arg1_array[:] = arg1
arg2_array = (c_char_p * len(arg2))()
arg2_array[:] = arg2
libsvm.load_from_python_interface(arg1_array, arg2_array, len(arg1_array))
if arg5:
arg5_list = arg5.encode('utf-8').split()
arg5_array = (c_char_p * len(arg5_list))()
arg5_array[:] = arg5_list
libsvm.thundersvm_predict_after_parse(arg3.encode('utf-8'), arg4.encode('utf-8'), arg5_array, len(arg5_array))
else :
arg5_array = None
libsvm.thundersvm_predict_after_parse(arg3.encode('utf-8'), arg4.encode('utf-8'), arg5_array, 0)
else:
param_list = arg1.split()
param_list.insert(0, 'thundersvm-predict')
param_array = (c_char_p * len(param_list))()
param_array[:] = param_list
libsvm.thundersvm_predict(len(param_list), param_array)
#libsvm.thundersvm_train(15, "./thundersvm-train -s 1 -t 2 -g 0.5 -c 100 -n 0.1 -e 0.001 dataset/test_dataset.txt dataset/test_dataset.txt.model");
| 34.650943
| 148
| 0.685543
|
8c1124cb326a13425ce5ebc5e86e2fa464497745
| 3,409
|
py
|
Python
|
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_docdb_clusters_encrypted_customer_managed_cmk_rule.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_docdb_clusters_encrypted_customer_managed_cmk_rule.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_docdb_clusters_encrypted_customer_managed_cmk_rule.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
import unittest
from cloudrail.knowledge.context.aws.docdb.docdb_cluster import DocumentDbCluster
from cloudrail.knowledge.context.aws.kms.kms_key import KmsKey
from cloudrail.knowledge.context.aws.kms.kms_key_manager import KeyManager
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.context.terraform_state import TerraformState
from cloudrail.knowledge.rules.aws.non_context_aware.encryption_enforcement_rules.encrypt_at_rest.ensure_docdb_clusters_encrypted_customer_managed_cmk_rule import \
EnsureDocdbClustersEncryptedCustomerManagedCmkRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
class TestEnsureDocdbClustersEncryptedCustomerManagedCmkRule(unittest.TestCase):
def setUp(self):
self.rule = EnsureDocdbClustersEncryptedCustomerManagedCmkRule()
def test_not_car_docdb_cluster_encrypted_at_rest_using_customer_managed_cmk__kms_key_is_not_customer__fail(self):
# Arrange
document_db_cluster: DocumentDbCluster = create_empty_entity(DocumentDbCluster)
terraform_state = create_empty_entity(TerraformState)
document_db_cluster.terraform_state = terraform_state
document_db_cluster.terraform_state.is_new = True
document_db_cluster.storage_encrypted = True
kms_key: KmsKey = create_empty_entity(KmsKey)
kms_key.key_manager = KeyManager.AWS
document_db_cluster.kms_data = kms_key
context = AwsEnvironmentContext(docdb_cluster=[document_db_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_not_car_docdb_cluster_encrypted_at_rest_using_customer_managed_cmk__kms_key_is_missing__fail(self):
# Arrange
document_db_cluster: DocumentDbCluster = create_empty_entity(DocumentDbCluster)
terraform_state = create_empty_entity(TerraformState)
document_db_cluster.terraform_state = terraform_state
document_db_cluster.terraform_state.is_new = True
document_db_cluster.storage_encrypted = True
document_db_cluster.kms_data = None
context = AwsEnvironmentContext(docdb_cluster=[document_db_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_not_car_docdb_cluster_encrypted_at_rest_using_customer_managed_cmk_pass(self):
# Arrange
document_db_cluster: DocumentDbCluster = create_empty_entity(DocumentDbCluster)
terraform_state = create_empty_entity(TerraformState)
document_db_cluster.terraform_state = terraform_state
document_db_cluster.terraform_state.is_new = True
document_db_cluster.storage_encrypted = True
kms_key: KmsKey = create_empty_entity(KmsKey)
kms_key.key_manager = KeyManager.CUSTOMER
document_db_cluster.kms_data = kms_key
context = AwsEnvironmentContext(docdb_cluster=[document_db_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
| 49.405797
| 164
| 0.777647
|
4a49a72848dfa5f254ce70ca46e6cd45be3819fe
| 47,845
|
py
|
Python
|
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 4
|
2021-04-21T05:09:49.000Z
|
2022-01-17T13:02:45.000Z
|
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | null | null | null |
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 1
|
2021-07-08T02:20:43.000Z
|
2021-07-08T02:20:43.000Z
|
# -*- coding: utf-8 -*-
# @Time : 20/5/1 16:58
# @Author : qgking
# @Email : qgking@tju.edu.cn
# @Software: PyCharm
# @Desc : LungSegDataLoader.py
from skimage.transform import resize
from torch.utils.data import Dataset
from common.base_utls import *
from common.data_utils import *
import torch
from torchvision import transforms
from torchvision.utils import make_grid
from data.data_augmentation import *
# --------------5fold start-------------
class CovidInf5foldDatasetBase(Dataset):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.pos = pos
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
for idx in range(len(img_list)):
# if idx > 1:
# break
file_name = basename(img_list[idx])[:-4]
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
txt_path = join(root_dir, file_name + '_inf.txt')
if not exists(txt_path):
txt_path = join(root_dir, file_name[1:] + '_inf.txt')
values = np.loadtxt(txt_path, delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(txt_path, 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
return None
# resize 5 fold
class CovidInf5fold2dAugSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dAugSegDataset, self).__init__(root_dir, img_list, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std)
# resize 5 fold
class CovidInf5fold2dResizeSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dResizeSegDataset, self).__init__(root_dir, img_list, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# crop 5 fold
class CovidInf5fold2dSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dSegDataset, self).__init__(root_dir, img_list, input_size, generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# --------------5fold end-------------
# --------------UnsuData start-------------
class CovidInfUnsuDatasetBase(Dataset):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.pos = pos
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
if 'MosMedData' in root_dir:
img_list = sorted(glob(join(root_dir, 'm*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(len(img_list)), size=int(len(img_list) * 0.2), replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
elif split == None:
img_list
elif 'COVID-19-CT' in root_dir:
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(10), size=2, replace=False)
idx.extend(indx)
np.random.seed(666)
indx = np.random.choice(range(10, 20), size=2, replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
elif split == None:
img_list
for idx in range(len(img_list)):
# if idx > 1:
# break
file_name = basename(img_list[idx])[:-4]
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
txt_path = join(root_dir, file_name + '_inf.txt')
if not exists(txt_path):
txt_path = join(root_dir, file_name[1:] + '_inf.txt')
values = np.loadtxt(txt_path, delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(txt_path, 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
return None
# resize unsupervised
class CovidInfUnsu2dResizeSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dResizeSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplab2d/inf_da_0_run_dapt_from_50_to_20_reszie_eeetest/tmp'
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_tmp", channel=1, nrow=8)
#
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
#
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
#
# cropp_pppp = torch.from_numpy((img * self.std + self.mean)[minx: maxx, miny: maxy, minz: maxz])
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_process_back_crop", channel=1, nrow=8)
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
# nbb = agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# cropp_pppp = np.expand_dims(np.transpose(nbb['image_patch'], (2, 0, 1)), axis=0)
# visual_batch(torch.from_numpy(cropp_pppp), save_dir, "test_img_process_back_crop_cc", channel=1, nrow=8)
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# resize unsupervised slice
class CovidInfUnsu2dAugSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dAugSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplab2d/inf_da_0_run_dapt_from_50_to_20_reszie_eeetest/tmp'
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_tmp", channel=1, nrow=8)
#
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
#
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
#
# cropp_pppp = torch.from_numpy((img * self.std + self.mean)[minx: maxx, miny: maxy, minz: maxz])
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_process_back_crop", channel=1, nrow=8)
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
# nbb = agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# cropp_pppp = np.expand_dims(np.transpose(nbb['image_patch'], (2, 0, 1)), axis=0)
# visual_batch(torch.from_numpy(cropp_pppp), save_dir, "test_img_process_back_crop_cc", channel=1, nrow=8)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std)
# resize unsupervised slice
class CovidInfValidUnsu2dAugSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfValidUnsu2dAugSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __len__(self):
return int(len(self.img_list))
def __getitem__(self, index):
# while True:
count = index
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
cropp_img = img[minx: maxx, miny: maxy, minz: maxz].copy()
cropp_infection = infection[minx: maxx, miny: maxy, minz: maxz].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, (maxz - minz))
# crop unsupervised
class CovidInfUnsu2dSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dSegDataset, self).__init__(root_dir, split, input_size, generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
# print(cen)
# cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
# c - cols // 2: c + cols // 2].copy()
# cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
# c - cols // 2:c + cols // 2].copy()
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# cropped_im = img[minx: maxx, miny: maxy, minz: maxz]
# cropped_if = infection[minx: maxx, miny: maxy, minz: maxz]
# sed = np.random.randint(1, numid)
# cen = lines[sed - 1]
# cen = np.fromstring(cen, dtype=int, sep=' ')
# c = cen[2] - minz
# cols = int(self.input_z)
# maxz = cropped_if.shape[2]
# minz = 0
# # c = np.random.randint(minz, maxz - cols - 1)
# flo = int(np.floor(cols / 2))
# cel = int(np.ceil(cols // 2))
# c = int(min(max(minz + flo, c), maxz - cel - 1))
# cropp_img = cropped_im[:, :, c - flo: c + cols - cel].copy()
# cropp_infection = cropped_if[:, :, c - flo: c + cols - cel].copy()
# if not (c >= minz and c < maxz):
# print('shape:', img.shape)
# print('min max:', (minx, maxx, miny, maxy, minz, maxz))
# print('cropped shape:', cropp_img.shape)
# print(self.img_list[count])
# print('min c %d, max c %d' % (c - flo, c + cols - cel))
# print(cen)
# exit(0)
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplabdilate2d/inf_seg_0_run_unsu_mos_covid_0/tmp'
# cropp_pppp = torch.from_numpy(cropp_img)
# cropp_pppp=cropp_pppp.unsqueeze(0).unsqueeze(0)
# cropp_iiii = torch.from_numpy(cropp_infection)
# cropp_iiii = cropp_iiii.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
# visual_batch(cropp_iiii, save_dir, "test_gt", channel=1, nrow=8)
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
a = int(min(max(minx + deps / 2, cen[0]), maxx - deps / 2 - 1))
b = int(min(max(miny + rows / 2, cen[1]), maxy - rows / 2 - 1))
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# --------------UnsuData end-------------
# --------------2D slice start-------------
class CovidInfUnsu2dDatasetBase(Dataset):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.pos = pos
self.img_list = []
self.lung_list = []
self.inf_list = []
self.total_slices = 0
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
if 'MosMedData' in root_dir:
img_list = sorted(glob(join(root_dir, 'm*.npy')), reverse=True)
elif 'COVID-19-CT' in root_dir:
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
elif 'Italy' in root_dir:
# TODO need to be modified
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
for idx in range(len(img_list)):
print(img_list[idx])
scans = np.load(img_list[idx])
img = scans[0].copy()
if 'MosMedData' in img_list[idx]:
lung = scans[2].copy()
infection = scans[1].copy()
# use lung and inf
# sums = np.sum(lung, axis=(0, 1))
# if np.sum(lung) == 0:
# continue
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
elif 'COVID-19-CT' in img_list[idx]:
lung = scans[1].copy()
infection = scans[2].copy()
# use lung and inf
# sums = np.sum(lung, axis=(0, 1))
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
elif 'Italy' in img_list[idx]:
lung = scans[1].copy()
infection = scans[2].copy()
# GGO and Consolidation
infection[np.where(infection == 3)] = 0
infection[np.where(infection > 0)] = 1
# use inf
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
s_img = img[:, :, inf_sli]
s_lung = lung[:, :, inf_sli]
s_infection = infection[:, :, inf_sli]
for ii in range(s_img.shape[-1]):
# if 'Italy' in img_list[idx]:
# semi_inf = os.listdir('../../log/3DCOVIDCT/Semi-Inf-Net/')
# if str(ii) + '.png' not in semi_inf:
# continue
self.img_list.append(s_img[:, :, ii])
self.lung_list.append(s_lung[:, :, ii])
self.inf_list.append(s_infection[:, :, ii])
del scans
# if 'MosMedData' in root_dir:
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(len(self.img_list)), size=int(len(self.img_list) * 0.2), replace=False)
# idx.extend(indx)
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
# elif 'COVID-19-CT' in root_dir:
# # img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(10), size=2, replace=False)
# idx.extend(indx)
# np.random.seed(666)
# indx = np.random.choice(range(10, 20), size=2, replace=False)
# idx.extend(indx)
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
# elif 'Italy' in root_dir:
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(len(self.img_list)), size=int(len(self.img_list) * 0.2), replace=False)
# idx.extend(indx)
# # TODO need to be modified
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
return None
# resize unsupervised slice
class CovidInfValidUnsu2dDatasetBase(CovidInfUnsu2dDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfValidUnsu2dDatasetBase, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __len__(self):
return int(len(self.img_list))
def __getitem__(self, index):
im = self.img_list[index]
lung = self.lung_list[index]
inf = self.inf_list[index]
minx, maxx, miny, maxy = min_max_voi_2d(lung, superior=5, inferior=5)
cropp_img = im[minx: maxx, miny: maxy].copy()
cropp_infection = inf[minx: maxx, miny: maxy].copy()
cropp_img = np.tile(np.expand_dims(cropp_img, axis=-1), self.input_z)
cropp_infection = np.tile(np.expand_dims(cropp_infection, axis=-1), self.input_z)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std,
num=4)
# simple 2D slices
class CovidInfUnsu2dSliceSegDataset(CovidInfUnsu2dDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dSliceSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
im = self.img_list[index]
lung = self.lung_list[index]
# print(np.unique(lung))
# print(index)
inf = self.inf_list[index]
minx, maxx, miny, maxy = min_max_voi_2d(lung, superior=5, inferior=5)
cropp_img = im[minx: maxx, miny: maxy].copy()
cropp_infection = inf[minx: maxx, miny: maxy].copy()
cropp_img = np.tile(np.expand_dims(cropp_img, axis=-1), self.input_z)
cropp_infection = np.tile(np.expand_dims(cropp_infection, axis=-1), self.input_z)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std,
num=4)
# --------------2D slice end-------------
class CovidInf20SegDataset(Dataset):
def __init__(self, root_dir, split='train', input_size=(256, 256, 64), generate_each=6):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(10), size=2, replace=False)
idx.extend(indx)
np.random.seed(666)
indx = np.random.choice(range(10, 20), size=2, replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
for idx in range(len(img_list)):
file_name = basename(img_list[idx])[:-4]
# if idx > 3:
# break
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
values = np.loadtxt(join(root_dir, file_name + '_inf.txt'), delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(join(root_dir, file_name + '_inf.txt'), 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0]
infection = scans[2]
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
# randomly scale
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
# print(c)
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
# print(c)
# print(minindex)
# print(maxindex)
# print(cen)
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2: c + cols // 2].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2:c + cols // 2].copy()
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
class CovidInfDegDataset(Dataset):
def __init__(self, img_list, split='train', input_size=(256, 256, 64)):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.img_list = []
self.split = split
for img_path in img_list:
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
label = int(img_path[st_index + 1:end_index])
if label >= 3:
self.img_list.extend([img_path, img_path, img_path, img_path, img_path])
else:
self.img_list.append(img_path)
# print('Total dataset %d: ' % (len(self.img_list)))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
img_path = self.img_list[index]
# print(img_path)
scans = np.load(img_path)
img = scans[0]
coarse_seg = scans[1]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(coarse_seg, superior=3, inferior=3)
patch = img[minx: maxx, miny: maxy, minz: maxz]
bagging_imgs = agumentation_img_3d(patch, self.input_x, self.input_y, self.input_z)
bagging_imgs = torch.from_numpy(np.expand_dims(bagging_imgs, 0))
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
image_label = int(img_path[st_index + 1:end_index])
# if image_label >= 3:
# l = 1
# else:
# l = 0
if image_label >= 3:
l = 2
else:
l = image_label - 1
return {
"image_patch": bagging_imgs,
'image_label': l,
}
class CovidInfDegDatasetMIL(Dataset):
def __init__(self, img_list, input_size=(256, 256, 64), generate_bag=6, seg_bagging_aug=None):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.generate_bag = generate_bag
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.img_list = img_list
self.seg_bagging_aug = seg_bagging_aug
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
img_path = self.img_list[index]
# print(img_path)
scans = np.load(img_path)
img = scans[0]
coarse_seg = scans[1]
bagging_imgs = self.seg_bagging_aug(img.copy(), coarse_seg, self.generate_bag, self.input_x, self.input_y,
self.input_z)
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
image_label = int(img_path[st_index + 1:end_index])
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return {
"image_patch": bagging_imgs,
'image_label': image_label,
}
class CovidInf50CoarseSegDataset(Dataset):
def __init__(self, root_dir, input_size=(256, 256, 64), generate_each=6):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
for idx in range(len(img_list)):
file_name = basename(img_list[idx])[:-4]
# if idx > 3:
# break
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
values = np.loadtxt(join(root_dir, file_name + '_inf.txt'), delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(join(root_dir, file_name + '_inf.txt'), 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0]
infection = scans[1]
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
# randomly scale
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
# print(c)
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
# print(c)
# print(minindex)
# print(maxindex)
# print(cen)
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2: c + cols // 2].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2:c + cols // 2].copy()
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
def CovidInfDegData(root_dir, npy_prefix='mstudy*'):
img_list = sorted(glob(join(root_dir, npy_prefix + '.npy')), reverse=False)
labels = []
imgs = []
for img_path in img_list:
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
label = int(img_path[st_index + 1:end_index])
if label == 0:
continue
# if label >= 3:
# l = 1
# else:
# l = 0
if label >= 3:
l = 2
else:
l = label - 1
labels.append(l)
imgs.append(img_path)
print('total imgs %d' % (len(imgs)))
return imgs, labels
| 44.673203
| 121
| 0.549943
|
6dcce1cdb21534569ec7f201da65d36a51f36c07
| 4,865
|
py
|
Python
|
statdyn/simulation/equilibrate.py
|
malramsay64/MD-Molecules-Hoomd
|
c3f2e83404cc9be6731ddd40983c6fa5b2ca9a2d
|
[
"MIT"
] | 1
|
2017-09-04T15:01:10.000Z
|
2017-09-04T15:01:10.000Z
|
statdyn/simulation/equilibrate.py
|
malramsay64/MD-Molecules-Hoomd
|
c3f2e83404cc9be6731ddd40983c6fa5b2ca9a2d
|
[
"MIT"
] | null | null | null |
statdyn/simulation/equilibrate.py
|
malramsay64/MD-Molecules-Hoomd
|
c3f2e83404cc9be6731ddd40983c6fa5b2ca9a2d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""A series of methods for the equilibration of configurations."""
import logging
import hoomd
import hoomd.md
import numpy as np
from .helper import dump_frame, set_dump, set_integrator, set_thermo
from .initialise import initialise_snapshot, make_orthorhombic
from .params import SimulationParams, paramsContext
logger = logging.getLogger(__name__)
def equil_crystal(snapshot: hoomd.data.SnapshotParticleData,
sim_params: SimulationParams,
) -> hoomd.data.SnapshotParticleData:
"""Equilbrate crystal."""
logger.debug('Simulation steps: %d', sim_params.num_steps)
temp_context = hoomd.context.initialize(sim_params.hoomd_args)
sys = initialise_snapshot(
snapshot=snapshot,
context=temp_context,
molecule=sim_params.molecule
)
with temp_context:
set_integrator(
sim_params=sim_params,
prime_interval=307,
crystal=True,
)
set_dump(sim_params.filename(prefix='dump'),
dump_period=sim_params.output_interval,
group=sim_params.group,)
set_thermo(sim_params.filename(prefix='equil'),
thermo_period=int(np.ceil(sim_params.output_interval/10)),
rigid=False,)
logger.debug('Running crystal equilibration for %d steps.', sim_params.num_steps)
hoomd.run(sim_params.num_steps)
logger.debug('Crystal equilibration completed')
dump_frame(sim_params.outfile, group=sim_params.group, extension=False)
return make_orthorhombic(sys.take_snapshot())
def equil_interface(snapshot: hoomd.data.SnapshotParticleData,
sim_params: SimulationParams,
) -> hoomd.data.SnapshotParticleData:
"""Equilbrate an interface at the desired temperature.
This is first done by equilibrating the crystal phase, which once completed
the liquid phase is equilibrated.
"""
if getattr(sim_params, 'init_temp', None) is None:
with paramsContext(sim_params, num_steps=2000, tauP=8, tau=8):
logger.debug('sim_params Steps: %d', sim_params.num_steps)
snapshot = equil_crystal(snapshot, sim_params)
logger.debug('Hoomd Arguments: %s', sim_params.hoomd_args)
temp_context = hoomd.context.initialize(sim_params.hoomd_args)
sys = initialise_snapshot(
snapshot=snapshot,
context=temp_context,
molecule=sim_params.molecule,
)
with temp_context:
logger.debug('Entering temporary context')
interface = _interface_group(sys)
# Set mobile group for integrator
with paramsContext(sim_params, group=interface):
set_integrator(sim_params=sim_params, crystal=True)
set_dump(sim_params.filename(prefix='dump'),
dump_period=sim_params.output_interval,
group=sim_params.group)
set_thermo(sim_params.filename(prefix='equil'),
thermo_period=int(np.ceil(sim_params.output_interval/10)),
rigid=False)
hoomd.run(sim_params.num_steps)
dump_frame(sim_params.outfile, group=sim_params.group, extension=False)
return sys.take_snapshot(all=True)
def equil_liquid(snapshot: hoomd.data.SnapshotParticleData,
sim_params: SimulationParams,
) -> hoomd.data.SnapshotParticleData:
"""Equilibrate a liquid configuration."""
temp_context = hoomd.context.initialize(sim_params.hoomd_args)
sys = initialise_snapshot(
snapshot=snapshot,
context=temp_context,
molecule=sim_params.molecule
)
with temp_context:
set_integrator(sim_params=sim_params,)
set_thermo(sim_params.filename('log'),
thermo_period=sim_params.output_interval)
hoomd.run(sim_params.num_steps)
logger.debug('Outfile: %s', sim_params.outfile)
dump_frame(sim_params.outfile, group=sim_params.group, extension=False)
return sys.take_snapshot(all=True)
def _interface_group(sys: hoomd.data.system_data,
stationary: bool=False):
stationary_group = hoomd.group.cuboid(
name='stationary',
xmin=-sys.box.Lx/3,
xmax=sys.box.Lx/3,
ymin=-sys.box.Ly/3,
ymax=sys.box.Ly/3,
)
if stationary:
return hoomd.group.intersection(
'rigid_stationary',
stationary_group,
hoomd.group.rigid_center()
)
return hoomd.group.intersection(
'rigid_mobile',
hoomd.group.difference('mobile', hoomd.group.all(), stationary_group),
hoomd.group.rigid_center(),
)
| 34.260563
| 89
| 0.664748
|
9b4e915f229c05f26885449628da70f2da25386b
| 17,459
|
py
|
Python
|
sie_get_clients/sie_get_python/sie_get_py.py
|
farsightsec/blog-code
|
e0e46ed4b93debc3eaadec2c3019f9538c324aee
|
[
"ECL-2.0",
"Apache-2.0"
] | 11
|
2016-07-02T19:00:02.000Z
|
2021-03-06T20:48:13.000Z
|
sie_get_clients/sie_get_python/sie_get_py.py
|
farsightsec/blog-code
|
e0e46ed4b93debc3eaadec2c3019f9538c324aee
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-08-04T09:53:54.000Z
|
2021-08-10T22:32:42.000Z
|
sie_get_clients/sie_get_python/sie_get_py.py
|
farsightsec/blog-code
|
e0e46ed4b93debc3eaadec2c3019f9538c324aee
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2015-03-16T15:06:42.000Z
|
2019-04-08T16:07:58.000Z
|
#!/usr/bin/env python3
"""This script demonstrates use of the SIE-Batch API from Python3"""
# Lint with $ pylint sie_get_py.py (assumes .pylintrc file in dir)
import calendar
import datetime
from datetime import datetime
from io import BytesIO
import json
from os import path
from pathlib import Path
import re
import sys
import time
from time import strftime
import pycurl
endpoint = 'batch.sie-remote.net'
useproxy = False # note: 'false' will not work
# -----------------------------------------------------------------------------
#
def getkeyfromlocalfile():
"""Retrieves the SIE-Batch API key"""
filepath = str(Path.home()) + "/.sie-get-key.txt"
if not path.exists(filepath):
print("\nERROR:\n\n No SIE-Batch API keyfile at "+filepath)
sys.exit(1)
with open(filepath) as stream:
myapikey = stream.read().rstrip()
return myapikey
# -----------------------------------------------------------------------------
#
def make_query(url, useproxy, params, outputfilename):
"""make query"""
if outputfilename != '-999':
try:
f = open(outputfilename, "wb")
except IOError:
sys.exit("error opening output file for results")
else:
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])
c.setopt(pycurl.POST, True)
c.setopt(pycurl.POSTFIELDS, params)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.CONNECTTIMEOUT, 300)
c.setopt(pycurl.TIMEOUT, 86400)
c.setopt(pycurl.USERAGENT, 'sie_get_py/1.0')
# we're going to write the actual data files directly to the outputfile
# other stuff (apikey check, channel listing, etc.) we're just going
# to write to a buffer (and then read that))
if outputfilename == '-999':
c.setopt(pycurl.WRITEDATA, buffer)
else:
c.setopt(pycurl.WRITEDATA, f)
if useproxy:
c.setopt(pycurl.PROXY, '127.0.0.1')
c.setopt(pycurl.PROXYPORT, 1080)
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
tries = 3
rc = ''
while tries >= 1:
c.perform()
rc = c.getinfo(c.RESPONSE_CODE)
# if writing to a buffer, we need to extract results and change
# from a bytestring to a string
if outputfilename == '-999':
body = buffer.getvalue()
content = body.decode('iso-8859-1')
# successful transfer? if so, break out of the loop
# if not, try it again
#pylint: disable=no-else-break
if rc == 200:
break
else:
print('Problem in make_query: response code='+str(rc))
#pylint: disable=no-else-return
if outputfilename == '-999':
return content
else:
sys.exit(0)
#pylint: enable=no-else-break
# -----------------------------------------------------------------------------
def validateapikeyonline(endpoint, useproxy):
""" check the API key for validity on the live SIE-Batch API server """
myapikeyval = getkeyfromlocalfile()
params = {'apikey' : myapikeyval}
params2 = json.dumps(params)
queryURL = 'https://' + endpoint + '/siebatchd/v1/validate'
returned_content = make_query(queryURL, useproxy, params2, '-999')
returned_content_json_format = json.loads(returned_content)
status = returned_content_json_format['_status']
return status
# -----------------------------------------------------------------------------
#
def format_and_printout_the_chan_list(chan_list):
""" we have the channel data, now format and print it out in a report """
chan_list_json_format = json.loads(chan_list)
new_hash = {}
#pylint: disable=unused-variable
for k, v in chan_list_json_format.items():
#pylint: enable=unused-variable
keystring = k.replace('ch', '')
keystring = keystring.rjust(3, ' ')
actual_val = chan_list_json_format[k]['description']
new_hash[keystring] = actual_val
for k, v in sorted(new_hash.items()):
print(k, new_hash[k])
sys.exit(0)
# -----------------------------------------------------------------------------
#
def list_channels(endpoint, useproxy):
""" retrieve a list of channels from the server """
myapikeyval = getkeyfromlocalfile()
params = {'apikey' : myapikeyval}
params2 = json.dumps(params)
queryURL = 'https://' + endpoint + '/siebatchd/v1/validate'
returned_content = make_query(queryURL, useproxy, params2, '-999')
returned_content_json_format = json.loads(returned_content)
extract_bit = returned_content_json_format['profile']['siebatch']
json_query_object = json.dumps(extract_bit)
format_and_printout_the_chan_list(json_query_object)
sys.exit(0)
# -----------------------------------------------------------------------------
#
def format_and_printout_chan_time_limits(chan, earliest_time_string, \
latest_time_string, volume):
""" print a summary of available channel date range and volume """
# take the channel status parameters and print them out in a little report
chan = chan.rjust(4)
if int(volume) >= 4:
volume = '{:,d}'.format(int(volume))
volume = volume.rjust(16)
# could add a header, but it's pretty self-obvious, right?
# printf('chan earliest datetime latest datetime octets\n')
print(chan+' "'+earliest_time_string+'" '+\
'"'+latest_time_string+'" '+volume)
sys.exit(0)
# -----------------------------------------------------------------------------
#
def show_intervals(endpoint, useproxy, chan_to_check):
""" get the starting and stopping date range and volume """
# with square brackets
newchan_to_check = '[' + chan_to_check + ']'
# no brackets, but with ch literal prefix
chan_with_prefix = 'ch' + chan_to_check
myapikeyval = getkeyfromlocalfile()
params = {'apikey': myapikeyval, 'channels': newchan_to_check}
params2 = json.dumps(params)
# {"channels":"[212]","apikey":"blah"} needs to become
# {"channels":[212],"apikey":"blah"} (e.g., no quotes around [chan])
params2 = params2.replace('"[', '[')
params2 = params2.replace(']"', ']')
url = 'https://' + endpoint + '/siebatchd/v1/siebatch/chdetails'
response = make_query(url, useproxy, params2, '-999')
decoded_results = json.loads(response)
earliest_time_string = \
decoded_results['channels'][chan_with_prefix]['earliest']
latest_time_string = \
decoded_results['channels'][chan_with_prefix]['latest']
size_string = \
decoded_results['channels'][chan_with_prefix]['size']
return (earliest_time_string, latest_time_string, size_string)
# ----------------------------------------------------------------------------
#
def fixup_ending_datetime_in_the_future():
""" if the ending date is in the future, reel it back in! """
# Replace future times with the current GMT time.
# The following returns a datetime structure
epochseconds = time.gmtime()
enddatetime = time.strftime('%Y-%m-%d %H:%M:%S', epochseconds)
enddatetime2 = re.sub(r'..$', '00', enddatetime)
return enddatetime2
# ----------------------------------------------------------------------------
#
def string_fmt_time_to_seconds(string_format_time):
""" utility function to convert a string format time to epoch seconds """
dt = datetime.strptime(string_format_time, "%Y-%m-%d %H:%M:%S")
epoch_seconds = calendar.timegm(dt.utctimetuple())
return epoch_seconds
# -----------------------------------------------------------------------------
#
def check_channel(endpoint, useproxy, chanflagstring, startdatetime, \
enddatetime):
""" make sure that the channel is available and the dates are in-range """
# get the available datetime range for this channel
#pylint:disable=unused-variable
(earliest_time_string, latest_time_string, chan_to_check) = \
show_intervals(endpoint, useproxy, chanflagstring)
#pylint:enable=unused-variable
# convert the requested and available start datetimes into Un*x seconds
requested_start_seconds = string_fmt_time_to_seconds(startdatetime)
earliest_date_seconds = string_fmt_time_to_seconds(earliest_time_string)
requested_stop_seconds = string_fmt_time_to_seconds(enddatetime)
latest_date_seconds = string_fmt_time_to_seconds(latest_time_string)
# start datetime must be earlier than stop date time
if (requested_stop_seconds - requested_start_seconds) < 0:
sys.exit('Start datetime must be earlier than stop datetime')
# start datetime may not be earlier than earliest data available
if (requested_start_seconds - earliest_date_seconds) < 0:
sys.exit('Start datetime out of range. Must be no earlier than ' + \
earliest_time_string)
# end datetime may not be in the future
if (requested_stop_seconds - latest_date_seconds) > 0:
enddatetime = fixup_ending_datetime_in_the_future()
return (startdatetime, enddatetime)
# ----------------------------------------------------------------------------
#
def validate_input_time_date_format(mydatetime):
""" make sure the user has followed the required datetime format """
# parameter is datetime to format check. if invalid, abort run.
# if valid, return the validated (but unchanged) datetime (could skip
# doing this for now, but at some point we might decide to fix up bad
# string formatting as a convenience to the user, so...)
# check the format with a regex
if not(re.match(r'/\A\d{4}-\d{2}-\d{2}\ \d{2}:\d{2}:\d{2}\Z/', \
mydatetime), mydatetime):
print("bad starting time format -- must be \"YYYY-MM-DD HH:MM:SS\"\n")
sys.exit(1)
return mydatetime
# -----------------------------------------------------------------------------
#
def zero_unused_seconds(mydatetime):
""" if seconds are non-zero in the time stamps, zero them out """
# since SIE-Batch API does not care about seconds, we set them to zero
mydatetime2 = re.sub(r'..$', '00', mydatetime)
return mydatetime2
# -----------------------------------------------------------------------------
#
def convert_relative_times_to_real_datetimes(minutesback):
""" one option is relative times; if we get one, make it a real time """
# in relative format, the initial "ending time" is actually the minutes
# worth of data we want to retrieve
# the "real" ending datetime will be created from the current GMT time
# we will be doing math on the epoch seconds
myformat = '%Y-%m-%d %H:%M:%S'
endingtime = time.gmtime()
epochseconds = calendar.timegm(endingtime)
# now compute the formatted ending date time in standard YYYY-MM-DD HH:MM:SS
enddatetime = strftime(myformat, endingtime)
# find just the seconds from that string
extraseconds = int(enddatetime[-2:])
# subtract the seconds from the full datetime to end up with 00 seconds
endingtime_seconds = int(epochseconds) - extraseconds
# let's now work on the starting time
# we compute the "real" starting datetime by offsetting backwards
# our to-be-modified datetime is in epoch seconds, so convert min to seconds
mysecondsback = int(minutesback) * 60
startseconds = endingtime_seconds - mysecondsback
startdatetime = strftime(myformat, time.gmtime(startseconds))
enddatetime = strftime(myformat, time.gmtime(endingtime_seconds))
return (startdatetime, enddatetime)
# -----------------------------------------------------------------------------
#
def fix_times():
""" handles calling the rest of the routines to fix up times """
# arguments come in from the command line so we don't pass them in
# chanflagstring = str(sys.argv[1])
startdatetime = str(sys.argv[2])
enddatetime = str(sys.argv[3])
# if relative times, replace the ending time with the current GMT time
# set the starting time back by the specified number of minutes
if startdatetime == 'now':
(startdatetime, enddatetime) = \
convert_relative_times_to_real_datetimes(enddatetime)
else:
# we have real timedate stamps for starting and ending datetimes
# process the starting datetime value...
# correctly written datetime value?
# also zero the seconds if present (SIE-Batch API doesn't use them)
validate_input_time_date_format(startdatetime)
startdatetime = zero_unused_seconds(startdatetime)
# repeat for the ending datetime value...
validate_input_time_date_format(enddatetime)
enddatetime = zero_unused_seconds(enddatetime)
return (startdatetime, enddatetime)
# ----------------------------------------------------------------------------
#
# https://stackoverflow.com/questions/1265665/how-can-i-check-if-a-string-represents-an-int-without-using-try-except
#
def isInt_try(v):
""" convenience function to see if a string might be integer-ish """
# pylint: disable=unused-variable,multiple-statements,bare-except
try: i = int(v)
except: return False
return True
# -----------------------------------------------------------------------------
#
def build_filename(chanflagstring, startdatetime, enddatetime):
"""construct the filename from the command line arguments and return it"""
string1 = startdatetime.replace(' ', '@')
string2 = enddatetime.replace(' ', '@')
nmsgchannels = ["204", "206", "207", "208", "221"]
if chanflagstring in nmsgchannels:
filetype = ".nmsg"
else:
filetype = ".jsonl"
outputfilename = "sie-ch" + chanflagstring + "-{" + string1 + \
"}-{" + string2 + "}" + filetype
return outputfilename
# -----------------------------------------------------------------------------
#
def print_usage_info():
""" deliver a succinct usage summary if needed """
print('''
Usage:
sie_get_py channel "now" minutesBack
Example: sie_get_py 212 now 15
OR
sie_get_py channel "startdatetime" "enddatetime"
Example: sie_get_py 212 "2020-01-07 00:13:00" "2020-01-07 00:28:00"
Convenience functions:
Check SIE-Batch API key: sie_get_py checkkey
Get a listing of channels: sie_get_py channels
Get datetime range and volume for a channel: sie_get_py 212
Notes:
Datetimes are UTC and must be quoted. (Current UTC datetime: $ date -u )
Zero pad any single digit months, days, hours, minutes or seconds.
Seconds must be entered as part of the UTC datetimes (but are ignored)
Ending datetime in the future? It will be clamped to current datetime.
''')
sys.exit(1)
# ----------------------------------------------------------------------------
#
def one_real_arg(endpoint, useproxy, first_arg):
""" sometimes we only see one option on the command line; process it """
defined_channels = {'24', '25', '27', '42', '80', '114', '115', \
'204', '206', '207', '208', '211', '212', '213', '214', '221'}
if first_arg == 'channels':
# list channels for the user
list_channels(endpoint, useproxy)
sys.exit(0)
elif first_arg == 'checkkey':
# check the user's key for validity
status = validateapikeyonline(endpoint, useproxy)
print("API key status is "+status)
sys.exit(0)
elif (isInt_try(first_arg) and (first_arg in defined_channels)):
# list details about the specified channel
(earliest, latest, datasize) = show_intervals(endpoint, \
useproxy, first_arg)
format_and_printout_chan_time_limits(first_arg, earliest, \
latest, datasize)
sys.exit(0)
elif (not(first_arg in defined_channels) and (isInt_try(first_arg))):
# the requested channel is not one we offer, so...
print("Channel not offered via this script")
sys.exit(0)
else:
print_usage_info()
sys.exit(0)
# ----------------------------------------------------------------------------
#
def three_real_args(endpoint, useproxy):
""" other times we may see three arguments on the command line... """
chanflagstring = str(sys.argv[1])
(startdatetime, enddatetime) = fix_times()
(startdatetime, enddatetime) = check_channel(endpoint, useproxy, \
chanflagstring, startdatetime, enddatetime)
outputfilename = build_filename(chanflagstring, startdatetime, enddatetime)
myapikey = getkeyfromlocalfile()
params = {"apikey": myapikey, "channel": int(chanflagstring), \
"start_time": startdatetime, "end_time": enddatetime}
params2 = json.dumps(params)
queryURL = "https://" + endpoint + "/siebatchd/v1/siebatch/chfetch"
make_query(queryURL, useproxy, params2, outputfilename)
sys.exit(0)
# ============================================================================
# main
if len(sys.argv) == 1:
print_usage_info()
sys.exit(0)
elif len(sys.argv) >= 2:
first_arg = sys.argv[1]
command_line_arg_count = len(sys.argv)-1
if command_line_arg_count == 1:
one_real_arg(endpoint, useproxy, first_arg)
sys.exit(0)
elif command_line_arg_count == 3:
three_real_args(endpoint, useproxy)
sys.exit(0)
elif (command_line_arg_count <= 0) or (command_line_arg_count >= 4) or \
(command_line_arg_count == 2):
print_usage_info()
sys.exit(0)
| 36.989407
| 116
| 0.616129
|
7ee610860134b77cbf38c732c6c184f56c814977
| 148
|
py
|
Python
|
recaptcha_form/account_backend/forms.py
|
ntucker/django-recaptcha-form
|
7d803d5ad7efb3cfa6b65b0c3a8a1289fefbcd9b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
recaptcha_form/account_backend/forms.py
|
ntucker/django-recaptcha-form
|
7d803d5ad7efb3cfa6b65b0c3a8a1289fefbcd9b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
recaptcha_form/account_backend/forms.py
|
ntucker/django-recaptcha-form
|
7d803d5ad7efb3cfa6b65b0c3a8a1289fefbcd9b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from recaptcha_form.forms import RecaptchaForm
from account.forms import SignupForm
class RecaptchaSignupForm(RecaptchaForm, SignupForm):
pass
| 24.666667
| 53
| 0.844595
|
70822d2ee74a32ed71d9c11e5591a664f6cfeb7a
| 12,725
|
py
|
Python
|
sfs/core.py
|
rhino2rhonda/vfs
|
8f55ae36d34871fc649092f2a28b91465a495341
|
[
"MIT"
] | null | null | null |
sfs/core.py
|
rhino2rhonda/vfs
|
8f55ae36d34871fc649092f2a28b91465a495341
|
[
"MIT"
] | null | null | null |
sfs/core.py
|
rhino2rhonda/vfs
|
8f55ae36d34871fc649092f2a28b91465a495341
|
[
"MIT"
] | null | null | null |
import os
import collections
import itertools
import shutil
import sfs.file_system as fs
import sfs.log_utils as log
# Constants
constants = {
'SFS_DIR': 'sfs',
'SFS_META_FILE': 'meta',
'COLLECTION_DIR': 'cols',
'COLLECTION_STATS_DIR': 'stats',
'SFS_FILE_EXTENSION': '.sfs',
}
# Tuple of altered file system nodes
SfsUpdates = collections.namedtuple('SFSUpdates', 'added deleted updated')
class SFS:
"""
SFS - Symbolic File System
- This class encapsulates all operations and queries related to a single SFS
- An SFS is a managed directory which can contain symbolic links to various other directories, discs and removable
media
- A directory can be added to an SFS as a named collection which involves addition of symbolic links to each file
(and symbolic link) in the added directory to the SFS instead of adding the actual files
- SFS maintains the metadata of all files such that they can queried even when the actual disc or removable media
is unavailable. This is especially useful while organizing data or in situations where immediate access to files is
not important or relevant
- A created SFS instance can be obtained by specifying any path within the SFS using the method 'SFS.get_by_path'
- An SFS cannot be nested within another SFS
"""
def __init__(self, root):
self.root = root
self.collections = {}
@staticmethod
def init_sfs(path):
"""
Initialize an SFS in an empty directory
- Creates a hidden directory for all SFS metadata
- Persists an SFS metadata file
"""
fs.create_hidden_directory(constants['SFS_DIR'], path)
sfs = SFS(path)
sfs._save()
@staticmethod
def get_by_path(path):
"""
Check whether 'path' lies within an SFS, ie, if any ancestor is a valid SFS root directory
:return: SFS instance if found or None
"""
while path != '/':
if SFS._is_sfs_root(path):
# Create an instance and load persisted metadata
sfs = SFS(path)
sfs._load()
return sfs
path = os.path.dirname(path)
return None
@staticmethod
def get_sfs_dir(root):
"""Compute SFS directory given the path of an SFS root directory"""
return fs.get_hidden_directory_path(constants['SFS_DIR'], root)
@staticmethod
def get_collections_dir(root):
"""Compute path of collections metadata directory given the path of an SFS root directory"""
return os.path.join(SFS.get_sfs_dir(root), constants['COLLECTION_DIR'])
def _save(self):
"""Persist the metadata of the current SFS"""
# fs.save_pickled(self.meta, SFS.get_sfs_dir(self.root), constants['SFS_META_FILE'])
save_dict = {
'collections': self.collections
}
fs.save_pickled(save_dict, SFS.get_sfs_dir(self.root), constants['SFS_META_FILE'])
def _load(self):
"""Load the metadata of the current SFS"""
save_dict = fs.load_unpickled(SFS.get_sfs_dir(self.root), constants['SFS_META_FILE'])
if type(save_dict) is dict and 'collections' in save_dict:
self.collections = save_dict['collections']
else:
log.logger.warn('Invalid metadata for SFS with root at "%s"', self.root)
@staticmethod
def _is_sfs_root(sfs_root):
"""Check if a directory is a valid SFS root. It must contain an SFS directory and within it metadata"""
meta_path = os.path.join(SFS.get_sfs_dir(sfs_root), constants['SFS_META_FILE'])
return os.path.exists(meta_path)
def add_collection(self, name, base):
"""
Add a directory located at 'base' as a collection named 'name' to the current SFS
- Updates SFS metadata with new collection details
- Creates a collection metadata directory
- Adds links to to all files in the directory
:return: A named tuple of type SfsUpdates indicating the number of files added
"""
col_dir = os.path.join(SFS.get_collections_dir(self.root), name)
col = Collection(name, base, self.root, col_dir)
os.makedirs(col_dir)
self.collections[name] = col.get_save_dict()
self._save()
return col.add_or_update()
def get_collection_by_name(self, name):
"""
Look up the SFS metadata for a collection with the specified name
:param name: Collection Name
:return: Instance of Collection if found else None
"""
return Collection.form_save_dict(
self.collections[name],
self.root,
os.path.join(SFS.get_collections_dir(self.root), name)
) if name in self.collections else None
def get_collection_by_path(self, path):
"""
Look up the SFS metadata for a collection whose source directory contains the specified path
:param path: A path within the source directory of a collection
:return: Instance of Collection if found else None
"""
path = fs.expand_path(path)
cols = self.get_all_collections()
while path != '/':
for col in cols.values():
if path == col.base:
return col
path = os.path.dirname(path)
return None
def get_all_collections(self):
"""Return all collections as a map of Collection Name to the corresponding Collection instance"""
return {name: self.get_collection_by_name(name) for name in self.collections.keys()}
def del_collection(self, name):
"""
Delete the metadata associated with a collection
- Removes collection details from SFS metadata
- Deletes the collection metadata directory
- Does NOT delete links corresponding to the deleted collection and any such links become orphan links
"""
col = self.get_collection_by_name(name)
self.collections.pop(name)
shutil.rmtree(col.col_dir)
self._save()
def del_orphans(self, col_root=None):
"""
Deletes orphan and foreign links from the current SFS
An orphan link is part of an existing collection but does not have associated metadata, for example, when a
collection is synced, metadata of deleted files is removed and associated links may become orphans
A foreign link is one that is not part of any collection in the current SFS
:param col_root: If not None, the deletion is restricted to links that point within the specified path
:return: A named tuple of type SfsUpdates indicating the number of links deleted
"""
def _del_cond_all(path):
"""Return True for a foreign or orphan link given its source path"""
col = self.get_collection_by_path(path)
return col is None or col.get_stats(path) is None
def _del_cond_by_root(path):
"""Return True for foreign or orphan links within the psecified collection root given the source path"""
return fs.is_parent_dir(path, col_root) and _del_cond_all(path)
_del_cond = _del_cond_all if col_root is None else _del_cond_by_root
deleted = 0
for root, dirs, files, links in SFS.walk(fs.walk_dfs, self.root):
# Check for foreign or orphan links and delete them
for lnk in links:
if _del_cond(os.readlink(lnk.path)):
deleted += 1
os.unlink(lnk.path)
return SfsUpdates(added=0, deleted=deleted, updated=0)
@staticmethod
def walk(walk_gen, start_dir):
"""
Enumerate paths inside an SFS by excluding SFS specific files and directories
Exclusions: SFS directory, Files with SFS specific extensions
:param walk_gen: A path generator, for example, fs.walk_bfs
:param start_dir: Directory within an SFS whose contents are to be enumerated
"""
sfs = SFS.get_by_path(start_dir)
_filter_dirs = {
fs.get_hidden_directory_path(constants['SFS_DIR'], sfs.root)
}
_filter_extensions = {
constants['SFS_FILE_EXTENSION']
}
for root, files, dirs, links in walk_gen(start_dir):
dirs[:] = list(filter(lambda n: n.path not in _filter_dirs, dirs))
files[:] = list(filter(lambda n: os.path.splitext(n.name)[1] not in _filter_extensions, files))
yield root, files, dirs, links
class Collection:
"""
SFS Collection
- This class encapsulates all operations and queries related to a single SFS collection
- A collection is a directory that has been added to an SFS and it comprises of links to the contents of the
directory as well as their metadata
- Collection instances are associated to and accessible through an SFS instance
"""
def __init__(self, name, base, sfs_root, col_dir):
self.name = name
self.base = base
self.sfs_root = sfs_root
self.col_dir = col_dir
self.stats_base = os.path.join(self.col_dir, constants['COLLECTION_STATS_DIR'])
@staticmethod
def form_save_dict(col_dict, sfs_root, col_dir):
"""Initialize an instance from a persisted metadata dictionary"""
return Collection(col_dict['name'], col_dict['base'], sfs_root, col_dir)
def get_save_dict(self):
"""Return a dictionary representing the collection state to tbe persisted"""
return {
'name': self.name,
'base': self.base
}
def add_or_update(self, curr_stats=None):
"""
Adds or updates collection metadata and adds links to new collection files
:param curr_stats: A set of all paths to files that were previously added to the collection. Any path not in
this set is treated as a new collection file. If None, all files are treated as new
:return: A named tuple of type SfsUpdates indicating the number of files added or updated
"""
added = updated = 0
sfs_base = os.path.join(self.sfs_root, self.name)
for root, files, dirs, links in fs.walk_bfs(self.base):
# Compute metadata directory and SFS directory for current directory
root_rel = os.path.relpath(root, self.base)
stats_root = os.path.abspath(os.path.join(self.stats_base, root_rel))
sfs_root = os.path.abspath(os.path.join(sfs_base, root_rel))
os.makedirs(stats_root, exist_ok=True)
for node in itertools.chain(files, links):
col_file = node.path
if curr_stats is None or col_file not in curr_stats:
# Create links for new collection files that are not in curr_stats
added += 1
os.makedirs(sfs_root, exist_ok=True)
sfs_file = os.path.join(sfs_root, node.name)
fs.create_symlink(col_file, sfs_file)
else:
updated += 1
# Save metadata
stats_file = os.path.join(stats_root, node.name)
fs.save_pickled(node.stat, stats_file)
return SfsUpdates(added=added, deleted=0, updated=updated)
def update(self):
"""
Updates the metadata of an existing collection specified by the given Collection Name
- Adds, deletes and updates collection metadata to synchronize with actual source directory
- For new files in collections (ones without pre-existing metadata) links are alos added to the SFS
:return: A named tuple of type SfsUpdates indicating the number of files added and updated
"""
# Create a set of all existing source files in the collection
curr_stats = set(
[os.path.join(self.base, os.path.relpath(f.path, self.stats_base))
for root, files, dirs, links in fs.walk_bfs(self.stats_base) for f in files]
)
# Delete all collections metadata
shutil.rmtree(self.stats_base)
# Update metadata and links
sfs_updates = self.add_or_update(curr_stats=curr_stats)
return sfs_updates
def get_stats(self, col_path):
"""
Fetch the metadata of source file located at 'col_path' in the current SFS collection
:param col_path: Path of source file or link
:return: Instance of fs.FSNode.NodeStats if found else None
"""
rel_path = os.path.relpath(col_path, self.base)
meta_path = os.path.join(self.stats_base, rel_path)
return fs.load_unpickled(meta_path) if os.path.isfile(meta_path) else None
| 41.584967
| 119
| 0.650059
|
52a6ee70f41a8481901dfdb90a905cf9b9aa9eda
| 1,855
|
py
|
Python
|
populate/populator/common/populator_actions.py
|
jsanchezgr/lib-exo-populator
|
c8c9e7e983d91149f561b01680f2890cd2ca0629
|
[
"BSD-3-Clause"
] | null | null | null |
populate/populator/common/populator_actions.py
|
jsanchezgr/lib-exo-populator
|
c8c9e7e983d91149f561b01680f2890cd2ca0629
|
[
"BSD-3-Clause"
] | 5
|
2019-07-03T06:26:00.000Z
|
2019-08-02T10:24:46.000Z
|
populate/populator/common/populator_actions.py
|
jsanchezgr/lib-exo-populator
|
c8c9e7e983d91149f561b01680f2890cd2ca0629
|
[
"BSD-3-Clause"
] | 2
|
2019-07-31T10:54:28.000Z
|
2019-08-01T10:59:37.000Z
|
import os
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.db import connection
from django.conf import settings
from populate.define_signals import post_populate, post_migrate
from populate.populator.common.bulk_operations import BulkOperations
from populate.populator.status import PopulateStatus
from populate.apps import PopulateConfig
class PopulatorActions:
def __init__(self, stdout, stderr):
self.status = PopulateStatus()
self.cmd = BaseCommand(stdout=stdout, stderr=stderr)
def get_status(self):
return self.status.get_status()
def populate(self, items):
results = {}
for item in items:
results[item] = BulkOperations(entity=item, cmd=self.cmd).populate()
self.sql_sequence_reset()
post_populate.send(sender=PopulateConfig)
self.status.set_populated()
return results
def init(self):
self.status.set_initialized()
# Execute migrations
call_command('migrate', stdout=self.cmd.stdout, stderr=self.cmd.stderr)
post_migrate.send(sender=PopulateConfig)
def finish_flag(self):
self.cmd.stdout.write('\n\n\n\n\n\n')
self.cmd.stdout.write(
self.cmd.style.SUCCESS('Populator succesfully finished!'))
def sql_sequence_reset(self):
if settings.POPULATE_REQUIRED_SEQUENCE_RESET_MODELS:
sql = call_command(
'sqlsequencereset',
*settings.POPULATE_REQUIRED_SEQUENCE_RESET_MODELS,
no_color=True,
stdout=self.cmd.stdout, stderr=self.cmd.stderr)
with connection.cursor() as cursor:
cursor.execute(sql)
def is_sql(self, file):
_, extension = os.path.splitext(file)
return extension == '.sql'
| 33.727273
| 80
| 0.679784
|
1cf1cdf663354b7687a106b9c8b576dfc7188047
| 6,715
|
py
|
Python
|
test/functional/feature_alerts_reorg.py
|
lunatechza/bitcoinvault
|
3110c208b1946ee5f689e469e58f80926282b0f2
|
[
"MIT"
] | null | null | null |
test/functional/feature_alerts_reorg.py
|
lunatechza/bitcoinvault
|
3110c208b1946ee5f689e469e58f80926282b0f2
|
[
"MIT"
] | null | null | null |
test/functional/feature_alerts_reorg.py
|
lunatechza/bitcoinvault
|
3110c208b1946ee5f689e469e58f80926282b0f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Alerts changeover logic."""
import os
import shutil
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
connect_nodes,
get_datadir_path
)
class VaultReorgTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [
[
"-reindex",
"-txindex",
],
[
"-reindex",
"-txindex",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def find_address(self, listreceivedbyaddress, address):
for addr in listreceivedbyaddress:
if addr['address'] == address: return addr
def find_vout_n(self, rawtransaction, amount):
for vout in rawtransaction['vout']:
if vout['value'] == amount: return vout['n']
def setup_network(self):
self.setup_nodes()
def reset_blockchain(self):
self.stop_nodes(wait=1)
for i in range(self.num_nodes):
datadir = get_datadir_path(self.options.tmpdir, i)
if os.path.exists(datadir):
shutil.rmtree(datadir)
os.mkdir(datadir)
self.nodes = []
self.setup_chain()
self.start_nodes(extra_args=self.extra_args)
self.setup_network()
self.sync_all()
def reset_node(self, i):
self.stop_node(i, wait=1)
datadir = get_datadir_path(self.options.tmpdir, i)
if os.path.exists(datadir):
shutil.rmtree(datadir)
os.mkdir(datadir)
self.start_node(i, extra_args=self.extra_args)
def run_test(self):
self.alert_recovery_pubkey = "02ecec100acb89f3049285ae01e7f03fb469e6b54d44b0f3c8240b1958e893cb8c"
self.alert_recovery_privkey = "cRfYLWua6WcpGbxuv5rJgA2eDESWxqgzmQjKQuqDFMfgbnEpqhrP"
self.COINBASE_MATURITY = 100
self.COINBASE_AMOUNT = Decimal(175)
self.reset_blockchain()
self.log.info("Test node reorganize blocks with tx alerts")
self.test_node_reorganize_blocks_with_tx_alerts()
self.reset_blockchain()
self.log.info("Test node reorganize blocks with recovery tx")
self.test_node_reorganize_blocks_with_recovery_tx()
def test_node_reorganize_blocks_with_tx_alerts(self):
addr0 = self.nodes[0].getnewaddress()
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_recovery_pubkey)
addr1 = self.nodes[1].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
# generate 1 block to addr0 and 109 blocks to addr1
self.nodes[0].generatetoaddress(1, addr0)
self.nodes[0].generatetoaddress(109, addr1) # 110
# send tx
self.nodes[0].sendtoaddress(alert_addr0['address'], 174.99)
self.nodes[0].generatetoaddress(10, addr1) # 120
# send atx
self.nodes[0].sendalerttoaddress(addr1, 174.98)
self.nodes[0].generatetoaddress(1, addr1) # 121
# confirm atx
self.nodes[0].generatetoaddress(144 + 35, addr1) # 300
# generate longer chain on node1
self.nodes[1].generatetoaddress(400, addr1) # 400
# pre-reorganization assert
assert self.nodes[0].getbestblock()['height'] == 300
assert self.nodes[0].getbalance() > 0 # coinbase (175) - spent (174.99) - fee
assert self.nodes[0].getbestblock() != self.nodes[1].getbestblock()
# synchronize nodes what cause reorganization on node0
connect_nodes(self.nodes[1], 0)
self.sync_all()
# post-reorganization assert
assert self.nodes[1].getbestblock()['height'] == 400
assert self.nodes[0].getbalance() == 0
assert self.nodes[0].getbestblock() == self.nodes[1].getbestblock()
def test_node_reorganize_blocks_with_recovery_tx(self):
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_recovery_pubkey)
other_addr0 = self.nodes[0].getnewaddress()
attacker_addr1 = self.nodes[1].getnewaddress()
# mine some coins to node0
self.nodes[0].generatetoaddress(200, alert_addr0['address']) # 200
assert self.nodes[0].getalertbalance() == (200 - self.COINBASE_MATURITY) * self.COINBASE_AMOUNT
# send atx to node1
atx_to_recover = self.nodes[0].sendalerttoaddress(attacker_addr1, 10)
atx_to_recover = self.nodes[0].gettransaction(atx_to_recover)['hex']
atx_to_recover = self.nodes[0].decoderawtransaction(atx_to_recover)
atx_fee = (200 - self.COINBASE_MATURITY) * self.COINBASE_AMOUNT - 10 - self.nodes[0].getalertbalance()
# generate block with atx above
self.nodes[0].generatetoaddress(1, alert_addr0['address']) # 201
# assert
assert self.nodes[0].getalertbalance() + 10 < (201 - self.COINBASE_MATURITY) * self.COINBASE_AMOUNT
assert atx_to_recover['txid'] in self.nodes[0].getbestblock()['atx']
# recover atx
amount_to_recover = sum([vout['value'] for vout in atx_to_recover['vout']])
assert atx_fee == self.COINBASE_AMOUNT - amount_to_recover
recovery_tx = self.nodes[0].createrecoverytransaction(atx_to_recover['txid'], {other_addr0: amount_to_recover})
recovery_tx = self.nodes[0].signrecoverytransaction(recovery_tx, [self.alert_recovery_privkey], alert_addr0['redeemScript'])
self.nodes[0].sendrawtransaction(recovery_tx['hex'])
self.nodes[0].generatetoaddress(144 + 5, alert_addr0['address']) # 350
# generate longer chain on node1
self.nodes[1].generatetoaddress(400, attacker_addr1) # 400
# pre-reorganization assert
assert self.nodes[0].getbestblock()['height'] == 350
assert self.nodes[0].getbalance() > 0 # coinbase (175) - spent (174.99) - fee
assert self.nodes[0].getbestblock() != self.nodes[1].getbestblock()
# synchronize nodes what cause reorganization on node0
connect_nodes(self.nodes[1], 0)
self.sync_all()
self.restart_node(0, self.extra_args[0])
self.sync_all()
# post-reorganization assert
assert self.nodes[1].getbestblock()['height'] == 400
assert self.nodes[0].getbalance() == 0
assert self.nodes[0].getbestblock() == self.nodes[1].getbestblock()
if __name__ == '__main__':
VaultReorgTest().main()
| 38.371429
| 132
| 0.657632
|
0d55a3b0de938fc3a06229f94bd222710c79035d
| 9,228
|
py
|
Python
|
habitat/tasks/rearrange/multi_task/composite_sensors.py
|
jturner65/habitat-api
|
b54dd6eb4a7d99762f326d1a78035600c4e57929
|
[
"MIT"
] | 489
|
2019-02-21T21:47:40.000Z
|
2020-08-10T06:43:24.000Z
|
habitat/tasks/rearrange/multi_task/composite_sensors.py
|
jturner65/habitat-api
|
b54dd6eb4a7d99762f326d1a78035600c4e57929
|
[
"MIT"
] | 380
|
2019-02-26T00:50:48.000Z
|
2020-08-11T14:57:07.000Z
|
habitat/tasks/rearrange/multi_task/composite_sensors.py
|
jturner65/habitat-api
|
b54dd6eb4a7d99762f326d1a78035600c4e57929
|
[
"MIT"
] | 167
|
2019-02-26T00:38:30.000Z
|
2020-08-09T23:07:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat.core.embodied_task import Measure
from habitat.core.registry import registry
from habitat.tasks.rearrange.rearrange_sensors import (
EndEffectorToObjectDistance,
ObjectToGoalDistance,
RearrangeReward,
)
from habitat.tasks.rearrange.utils import rearrange_logger
@registry.register_measure
class MoveObjectsReward(RearrangeReward):
"""
A reward based on L2 distances to object/goal.
"""
cls_uuid: str = "move_obj_reward"
@staticmethod
def _get_uuid(*args, **kwargs):
return MoveObjectsReward.cls_uuid
def __init__(self, *args, **kwargs):
self._cur_rearrange_step = 0
super().__init__(*args, **kwargs)
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._cur_rearrange_step = 0
self._prev_holding_obj = False
self._did_give_pick_reward = {}
task.measurements.check_measure_dependencies(
self.uuid,
[
ObjectToGoalDistance.cls_uuid,
EndEffectorToObjectDistance.cls_uuid,
],
)
to_goal = task.measurements.measures[
ObjectToGoalDistance.cls_uuid
].get_metric()
to_obj = task.measurements.measures[
EndEffectorToObjectDistance.cls_uuid
].get_metric()
self._prev_measures = (to_obj, to_goal)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
super().update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
idxs, _ = self._sim.get_targets()
targ_obj_idx = idxs[self._cur_rearrange_step]
abs_targ_obj_idx = self._sim.scene_obj_ids[targ_obj_idx]
targ_obj_idx = str(targ_obj_idx)
num_targs = len(idxs)
to_goal = task.measurements.measures[
ObjectToGoalDistance.cls_uuid
].get_metric()
to_obj = task.measurements.measures[
EndEffectorToObjectDistance.cls_uuid
].get_metric()
is_holding_obj = self._sim.grasp_mgr.snap_idx == abs_targ_obj_idx
if is_holding_obj:
dist = to_goal[targ_obj_idx]
dist_diff = (
self._prev_measures[1][targ_obj_idx] - to_goal[targ_obj_idx]
)
else:
dist = to_obj[targ_obj_idx]
dist_diff = (
self._prev_measures[0][targ_obj_idx] - to_obj[targ_obj_idx]
)
if (
is_holding_obj
and not self._prev_holding_obj
and self._cur_rearrange_step not in self._did_give_pick_reward
):
self._metric += self._config.PICK_REWARD
self._did_give_pick_reward[self._cur_rearrange_step] = True
if (
dist < self._config.SUCCESS_DIST
and not is_holding_obj
and self._cur_rearrange_step < num_targs
):
self._metric += self._config.SINGLE_REARRANGE_REWARD
self._cur_rearrange_step += 1
self._cur_rearrange_step = min(
self._cur_rearrange_step, num_targs - 1
)
self._metric += self._config.DIST_REWARD * dist_diff
self._prev_measures = (to_obj, to_goal)
self._prev_holding_obj = is_holding_obj
@registry.register_measure
class CompositeReward(Measure):
"""
The reward based on where the agent currently is in the hand defined solution list.
"""
cls_uuid: str = "composite_reward"
@staticmethod
def _get_uuid(*args, **kwargs):
return CompositeReward.cls_uuid
def __init__(self, sim, config, *args, **kwargs):
super().__init__(**kwargs)
self._sim = sim
self._config = config
self._prev_node_idx = None
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[CompositeNodeIdx.cls_uuid],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
self._metric = 0.0
node_measure = task.measurements.measures[CompositeNodeIdx.cls_uuid]
node_idx = node_measure.get_metric()["node_idx"]
if self._prev_node_idx is None:
self._prev_node_idx = node_idx
elif node_idx > self._prev_node_idx:
self._metric += self._config.STAGE_COMPLETE_REWARD
cur_task = task.cur_task
if cur_task is None:
cur_task_cfg = task.get_inferrred_node_task()._config
else:
cur_task_cfg = cur_task._config
if "REWARD_MEASURE" not in cur_task_cfg:
raise ValueError(
f"Cannot find REWARD_MEASURE key in {list(cur_task_cfg.keys())}"
)
cur_task_reward = task.measurements.measures[
cur_task_cfg.REWARD_MEASURE
].get_metric()
self._metric += cur_task_reward
self._prev_node_idx = node_idx
@registry.register_measure
class CompositeSuccess(Measure):
"""
Did satisfy all the goal predicates?
"""
cls_uuid: str = "composite_success"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(**kwargs)
self._sim = sim
self._config = config
@staticmethod
def _get_uuid(*args, **kwargs):
return CompositeSuccess.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
does_action_want_stop = task.actions[
"REARRANGE_STOP"
].does_want_terminate
self._metric = task.is_goal_state_satisfied() and does_action_want_stop
if does_action_want_stop:
task.should_end = True
@registry.register_measure
class CompositeNodeIdx(Measure):
"""
Adds several keys to the metrics dictionary:
- `reached_i`: Did the agent succeed in sub-task at index `i` of the
sub-task `solution` list?
- `node_idx`: Index of the agent in completing the sub-tasks from
the `solution` list.
- `[TASK_NAME]_success`: Did the agent complete a particular stage
defined in `stage_goals`.
"""
cls_uuid: str = "composite_node_idx"
def __init__(self, sim, config, *args, **kwargs):
super().__init__(**kwargs)
self._sim = sim
self._config = config
self._stage_succ = []
@staticmethod
def _get_uuid(*args, **kwargs):
return CompositeNodeIdx.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._stage_succ = []
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs,
)
def update_metric(self, *args, episode, task, observations, **kwargs):
cur_task = task.cur_task
self._metric = {}
if cur_task is None:
inf_cur_task_cfg = task.get_inferrred_node_task()._config
if "SUCCESS_MEASURE" not in inf_cur_task_cfg:
raise ValueError(
f"SUCCESS_MEASURE key not found in config: {inf_cur_task_cfg}"
)
is_succ = task.measurements.measures[
inf_cur_task_cfg.SUCCESS_MEASURE
].get_metric()
if is_succ:
task.increment_inferred_solution_idx(episode)
rearrange_logger.debug(
f"Completed {inf_cur_task_cfg.TYPE}, incremented node to {task.get_inferrred_node_task()}"
)
node_idx = task.get_inferred_node_idx()
for i in range(task.num_solution_subtasks):
self._metric[f"reached_{i}"] = (
task.get_inferred_node_idx() >= i
)
else:
node_idx = task.cur_node
self._metric["node_idx"] = node_idx
self._update_info_stage_succ(task, self._metric)
def _update_info_stage_succ(self, task, info):
stage_goals = task.stage_goals
for k, preds in stage_goals.items():
succ_k = f"{k}_success"
if k in self._stage_succ:
info[succ_k] = 1.0
else:
if task.are_predicates_satisfied(preds):
info[succ_k] = 1.0
self._stage_succ.append(k)
else:
info[succ_k] = 0.0
| 31.930796
| 110
| 0.604356
|
c67e0a1ad84ccf2a2826d118f14d801f51785b85
| 768
|
py
|
Python
|
wdna_tests/unit/test_flask_app_phylo.py
|
dduleba/mtdna
|
e98b8d08df862dcb9baa8d3918d86bc428ad4342
|
[
"MIT"
] | null | null | null |
wdna_tests/unit/test_flask_app_phylo.py
|
dduleba/mtdna
|
e98b8d08df862dcb9baa8d3918d86bc428ad4342
|
[
"MIT"
] | null | null | null |
wdna_tests/unit/test_flask_app_phylo.py
|
dduleba/mtdna
|
e98b8d08df862dcb9baa8d3918d86bc428ad4342
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from wdna.www import flask_app
class FlaskAppTestPhyloCase(TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
flask_app.app.testing = True
cls.client = flask_app.app.test_client()
cls.rv = cls.client.get('/wdna/phylo')
def test_phylo(self):
self.assertEqual(self.rv.status_code, 200)
def test_phylo_removed(self):
rv_json = self.rv.json
for row in rv_json:
if row.get('position') == 16278 and row['haplogroup'] == 'L0a1b':
self.assertEqual('yes', row['back_mutation'])
if row.get('position') == 195 and row['haplogroup'] == 'L2a3':
self.assertEqual('double', row['back_mutation'])
| 30.72
| 77
| 0.617188
|
29b93abe3e6a13fde8de7495a80c2034c5fde8b3
| 14,583
|
py
|
Python
|
pytorch_lightning/core/datamodule.py
|
hobogalaxy/pytorch-lightning
|
46540ee260af5137ac38ff0ce1022290cbc120b9
|
[
"Apache-2.0"
] | 1
|
2021-03-03T11:01:43.000Z
|
2021-03-03T11:01:43.000Z
|
pytorch_lightning/core/datamodule.py
|
hobogalaxy/pytorch-lightning
|
46540ee260af5137ac38ff0ce1022290cbc120b9
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/core/datamodule.py
|
hobogalaxy/pytorch-lightning
|
46540ee260af5137ac38ff0ce1022290cbc120b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LightningDataModule for loading DataLoaders with ease."""
import functools
import inspect
from abc import abstractmethod
from argparse import ArgumentParser, Namespace
from typing import Any, List, Mapping, Optional, Sequence, Tuple, Union
from torch.utils.data import DataLoader, Dataset
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import str_to_bool, str_to_bool_or_str
class _DataModuleWrapper(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__has_added_checks = False
def __call__(cls, *args, **kwargs):
"""A wrapper for LightningDataModule that:
1. Runs user defined subclass's __init__
2. Assures prepare_data() runs on rank 0
3. Lets you check prepare_data and setup to see if they've been called
"""
if not cls.__has_added_checks:
cls.__has_added_checks = True
# Track prepare_data calls and make sure it runs on rank zero
cls.prepare_data = track_data_hook_calls(rank_zero_only(cls.prepare_data))
# Track setup calls
cls.setup = track_data_hook_calls(cls.setup)
# Get instance of LightningDataModule by mocking its __init__ via __call__
obj = type.__call__(cls, *args, **kwargs)
return obj
def track_data_hook_calls(fn):
"""A decorator that checks if prepare_data/setup have been called.
- When dm.prepare_data() is called, dm.has_prepared_data gets set to True
- When dm.setup('fit') is called, dm.has_setup_fit gets set to True
- When dm.setup('test') is called, dm.has_setup_test gets set to True
- When dm.setup() is called without stage arg, both dm.has_setup_fit and dm.has_setup_test get set to True
Args:
fn (function): Function that will be tracked to see if it has been called.
Returns:
function: Decorated function that tracks its call status and saves it to private attrs in its obj instance.
"""
@functools.wraps(fn)
def wrapped_fn(*args, **kwargs):
# The object instance from which setup or prepare_data was called
obj = args[0]
# If calling setup, we check the stage and assign stage-specific bool args
if fn.__name__ == "setup":
# Get stage either by grabbing from args or checking kwargs.
# If not provided, set call status of 'fit' and 'test' to True.
# We do this so __attach_datamodule in trainer.py doesn't mistakenly call setup('test') on trainer.test()
stage = args[1] if len(args) > 1 else kwargs.get("stage", None)
if stage == "fit" or stage is None:
obj._has_setup_fit = True
if stage == "test" or stage is None:
obj._has_setup_test = True
if fn.__name__ == "prepare_data":
obj._has_prepared_data = True
return fn(*args, **kwargs)
return wrapped_fn
class LightningDataModule(CheckpointHooks, DataHooks, metaclass=_DataModuleWrapper):
"""
A DataModule standardizes the training, val, test splits, data preparation and transforms.
The main advantage is consistent data splits, data preparation and transforms across models.
Example::
class MyDataModule(LightningDataModule):
def __init__(self):
super().__init__()
def prepare_data(self):
# download, split, etc...
# only called on 1 GPU/TPU in distributed
def setup(self):
# make assignments here (val/train/test split)
# called on every process in DDP
def train_dataloader(self):
train_split = Dataset(...)
return DataLoader(train_split)
def val_dataloader(self):
val_split = Dataset(...)
return DataLoader(val_split)
def test_dataloader(self):
test_split = Dataset(...)
return DataLoader(test_split)
A DataModule implements 5 key methods:
* **prepare_data** (things to do on 1 GPU/TPU not on every GPU/TPU in distributed mode).
* **setup** (things to do on every accelerator in distributed mode).
* **train_dataloader** the training dataloader.
* **val_dataloader** the val dataloader(s).
* **test_dataloader** the test dataloader(s).
This allows you to share a full dataset without explaining how to download,
split transform and process the data
"""
name: str = ...
def __init__(
self,
train_transforms=None,
val_transforms=None,
test_transforms=None,
dims=None,
):
super().__init__()
self._train_transforms = train_transforms
self._val_transforms = val_transforms
self._test_transforms = test_transforms
self._dims = dims if dims is not None else ()
# Pointer to the trainer object
self.trainer = None
# Private attrs to keep track of whether or not data hooks have been called yet
self._has_prepared_data = False
self._has_setup_fit = False
self._has_setup_test = False
@property
def train_transforms(self):
"""
Optional transforms (or collection of transforms) you can apply to train dataset
"""
return self._train_transforms
@train_transforms.setter
def train_transforms(self, t):
self._train_transforms = t
@property
def val_transforms(self):
"""
Optional transforms (or collection of transforms) you can apply to validation dataset
"""
return self._val_transforms
@val_transforms.setter
def val_transforms(self, t):
self._val_transforms = t
@property
def test_transforms(self):
"""
Optional transforms (or collection of transforms) you can apply to test dataset
"""
return self._test_transforms
@test_transforms.setter
def test_transforms(self, t):
self._test_transforms = t
@property
def dims(self):
"""
A tuple describing the shape of your data. Extra functionality exposed in ``size``.
"""
return self._dims
@dims.setter
def dims(self, d):
self._dims = d
def size(self, dim=None) -> Union[Tuple, int]:
"""
Return the dimension of each input either as a tuple or list of tuples. You can index this
just as you would with a torch tensor.
"""
if dim is not None:
return self.dims[dim]
return self.dims
@property
def has_prepared_data(self):
"""Return bool letting you know if datamodule.prepare_data() has been called or not.
Returns:
bool: True if datamodule.prepare_data() has been called. False by default.
"""
return self._has_prepared_data
@property
def has_setup_fit(self):
"""Return bool letting you know if datamodule.setup('fit') has been called or not.
Returns:
bool: True if datamodule.setup('fit') has been called. False by default.
"""
return self._has_setup_fit
@property
def has_setup_test(self):
"""Return bool letting you know if datamodule.setup('test') has been called or not.
Returns:
bool: True if datamodule.setup('test') has been called. False by default.
"""
return self._has_setup_test
@abstractmethod
def prepare_data(self, *args, **kwargs):
pass
@abstractmethod
def setup(self, stage: Optional[str] = None):
pass
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:
r"""Extends existing argparse by default `LightningDataModule` attributes."""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
added_args = [x.dest for x in parser._actions]
blacklist = ["kwargs"]
depr_arg_names = blacklist + added_args
depr_arg_names = set(depr_arg_names)
allowed_types = (str, int, float, bool)
# TODO: get "help" from docstring :)
for arg, arg_types, arg_default in (
at for at in cls.get_init_arguments_and_types() if at[0] not in depr_arg_names
):
arg_types = [at for at in allowed_types if at in arg_types]
if not arg_types:
# skip argument with not supported type
continue
arg_kwargs = {}
if bool in arg_types:
arg_kwargs.update(nargs="?", const=True)
# if the only arg type is bool
if len(arg_types) == 1:
use_type = str_to_bool
# if only two args (str, bool)
elif len(arg_types) == 2 and set(arg_types) == {str, bool}:
use_type = str_to_bool_or_str
else:
# filter out the bool as we need to use more general
use_type = [at for at in arg_types if at is not bool][0]
else:
use_type = arg_types[0]
if arg_default == inspect._empty:
arg_default = None
parser.add_argument(
f"--{arg}",
dest=arg,
default=arg_default,
type=use_type,
help=f"autogenerated by plb.{cls.__name__}",
**arg_kwargs,
)
return parser
@classmethod
def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):
"""
Create an instance from CLI arguments.
Args:
args: The parser or namespace to take arguments from. Only known arguments will be
parsed and passed to the :class:`LightningDataModule`.
**kwargs: Additional keyword arguments that may override ones in the parser or namespace.
These must be valid DataModule arguments.
Example::
parser = ArgumentParser(add_help=False)
parser = LightningDataModule.add_argparse_args(parser)
module = LightningDataModule.from_argparse_args(args)
"""
if isinstance(args, ArgumentParser):
args = cls.parse_argparser(args)
params = vars(args)
# we only want to pass in valid DataModule args, the rest may be user specific
valid_kwargs = inspect.signature(cls.__init__).parameters
datamodule_kwargs = dict((name, params[name]) for name in valid_kwargs if name in params)
datamodule_kwargs.update(**kwargs)
return cls(**datamodule_kwargs)
@classmethod
def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:
r"""Scans the DataModule signature and returns argument names, types and default values.
Returns:
List with tuples of 3 values:
(argument name, set with argument types, argument default value).
"""
datamodule_default_params = inspect.signature(cls.__init__).parameters
name_type_default = []
for arg in datamodule_default_params:
arg_type = datamodule_default_params[arg].annotation
arg_default = datamodule_default_params[arg].default
try:
arg_types = tuple(arg_type.__args__)
except AttributeError:
arg_types = (arg_type, )
name_type_default.append((arg, arg_types, arg_default))
return name_type_default
@classmethod
def from_datasets(
cls,
train_dataset: Optional[Union[Dataset, Sequence[Dataset], Mapping[str, Dataset]]] = None,
val_dataset: Optional[Union[Dataset, Sequence[Dataset]]] = None,
test_dataset: Optional[Union[Dataset, Sequence[Dataset]]] = None,
batch_size: int = 1,
num_workers: int = 0,
):
r"""
Create an instance from torch.utils.data.Dataset.
Args:
train_dataset: (optional) Dataset to be used for train_dataloader()
val_dataset: (optional) Dataset or list of Dataset to be used for val_dataloader()
test_dataset: (optional) Dataset or list of Dataset to be used for test_dataloader()
batch_size: Batch size to use for each dataloader. Default is 1.
num_workers: Number of subprocesses to use for data loading. 0 means that the
data will be loaded in the main process. Number of CPUs available.
"""
def dataloader(ds, shuffle=False):
return DataLoader(
ds,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
def train_dataloader():
if isinstance(train_dataset, Mapping):
return {key: dataloader(ds, shuffle=True) for key, ds in train_dataset.items()}
if isinstance(train_dataset, Sequence):
return [dataloader(ds, shuffle=True) for ds in train_dataset]
return dataloader(train_dataset, shuffle=True)
def val_dataloader():
if isinstance(val_dataset, Sequence):
return [dataloader(ds) for ds in val_dataset]
return dataloader(val_dataset)
def test_dataloader():
if isinstance(test_dataset, Sequence):
return [dataloader(ds) for ds in test_dataset]
return dataloader(test_dataset)
datamodule = cls()
if train_dataset is not None:
datamodule.train_dataloader = train_dataloader
if val_dataset is not None:
datamodule.val_dataloader = val_dataloader
if test_dataset is not None:
datamodule.test_dataloader = test_dataloader
return datamodule
| 35.830467
| 117
| 0.629774
|
7c859b95fc1a4bd627e1fb807d9af6870b062468
| 510
|
py
|
Python
|
hw6/main.py
|
go-may/goiteens-python3-abdaladze
|
5fb544a3696fcf57f11b6292b468fc7f533ff749
|
[
"MIT"
] | null | null | null |
hw6/main.py
|
go-may/goiteens-python3-abdaladze
|
5fb544a3696fcf57f11b6292b468fc7f533ff749
|
[
"MIT"
] | null | null | null |
hw6/main.py
|
go-may/goiteens-python3-abdaladze
|
5fb544a3696fcf57f11b6292b468fc7f533ff749
|
[
"MIT"
] | null | null | null |
print ("Hello this is our calendar application")
print ("Please choouse your option: ")
print ("1.Write new date")
print ("2.Read all dates")
print ("3.Exit")
messageFromUser = input()
if messageFromUser == "1":
print ("Enter your new date: ")
newDate = input()
calendar = open("calendar.txt", "a")
calendar.write(newDate + "\n")
calendar.close()
if messageFromUser == "2":
calendar = open("calendar.txt", "r")
print (calendar.read())
if messageFromUser == "3":
print ("GoodBye!")
| 25.5
| 49
| 0.647059
|
6dd82b859285c875d3e5efd5c068f105638088c3
| 9,743
|
py
|
Python
|
RebuttalAnalysis/predict_after_score.py
|
UKPLab/naacl2019-does-my-rebuttal-matter
|
ad76295863046e8d44995736598ca2643e47a3c9
|
[
"Apache-2.0"
] | 24
|
2019-03-11T13:53:41.000Z
|
2021-03-26T01:39:43.000Z
|
RebuttalAnalysis/predict_after_score.py
|
yg211/naacl2019-does-my-rebuttal-matter
|
69b5311e5f5c51755091c0db8cffa8f17f01615a
|
[
"Apache-2.0"
] | 2
|
2020-06-13T10:59:51.000Z
|
2020-08-06T09:03:27.000Z
|
RebuttalAnalysis/predict_after_score.py
|
yg211/naacl2019-does-my-rebuttal-matter
|
69b5311e5f5c51755091c0db8cffa8f17f01615a
|
[
"Apache-2.0"
] | 2
|
2019-09-17T09:25:27.000Z
|
2020-03-09T15:55:16.000Z
|
# SAMPLE USAGE:
# python3 step2_featureSelction_regression.py
# before running, install all packages in requirement.txt
import sys
sys.path.append('../..')
import argparse
import numpy as np
import sklearn.linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.feature_selection import f_regression
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.stats import spearmanr, pearsonr
import math
def printCorrelation(scores,features,feature_names):
print('\n\n===== Pearson Correlation Between Features and Scores =====')
for col_id in range(len(feature_names)):
(rho, pvalue) = pearsonr(scores,features[:,col_id])
print('{}: {}'.format(feature_names[col_id],rho))
print('\n\n===== Pearson Correlation Between Features=====')
for ii in range(len(feature_names)-1):
for jj in range(ii+1,len(feature_names)):
(rho, pvalue) = pearsonr(features[:,jj],features[:,ii])
print('{} and {}: {}'.format(feature_names[ii],feature_names[jj],rho))
print(' ')
def selectByREF(features,fnames,target_num=10):
model = sklearn.linear_model.LinearRegression()
rfe = RFE(model,target_num)
fit = rfe.fit(features,scores)
fi = np.array(fit.support_)
#print('ranking of features:')
#for cnt,ii in enumerate(fit.ranking_):
# print('no. {}: {}'.format(cnt,fnames[ii]))
return features[:,fi], fnames[fi]
def selectByPCA(features,fnames,target_num=10):
pca = PCA(target_num,copy=True)
fit = pca.fit_transform(features)
return fit, None
def selectByVIF(features, fnames, target_num=10, threshold=5.):
variables = np.array([i for i in range(features.shape[1])])
cnt = 0
for i in np.arange(0, len(variables)):
cnt += 1
vif = [variance_inflation_factor(features[:,variables], ix) for ix in range(len(variables))]
#print('round {}: {}'.format(cnt,vif))
maxloc = vif.index(max(vif))
if len(variables) <= target_num:# or max(vif) < threshold:
break
else:
variables = np.delete(variables,maxloc)
#print('Remaining variables ({}):'.format(len(variables)))
#for nn in variables:
#print(fnames[nn])
return features[:,variables], fnames[variables]
def printBaselineResults(features,names,scores):
self_prevs = features[:,[nn=='self_prev' for nn in names]]
all_means = features[:,[nn=='all_mean' for nn in names]]
sp_error = mean_squared_error(self_prevs,scores)
am_error = mean_squared_error(all_means,scores)
print('\n=====BASELINES=====')
print('previous score baseline error: {}'.format(sp_error))
print('all previous mean baseline error: {}'.format(am_error))
print()
def addRebuttalScoreFeature(features,names):
self_prevs = features[:,[nn=='all_max' for nn in names]]
added = [max(ss[0],4.) for ss in self_prevs]
nfeatures = np.append(np.array(added).reshape(-1,1),features,axis=1)
nnames = np.append('rebuttal_score',names)
return nfeatures,nnames
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--feature_num',action="store",dest="fn",nargs='?',default=-1)
parser.add_argument("--classifier",action="store",dest="clf",default="lin-reg",nargs='?',help="gp|lin-reg|forest|tree")
parser.add_argument("--feature-selector",action="store",dest="fs",default="vif",nargs='?',help='pca or vif')
parser.add_argument("--feature-set",action="store",dest="fst",
default="opinion-politeness-specificity-length-convincingness-similarity",nargs='?')
#default="opinion",nargs='?')
parser.add_argument("--data",action="store",dest="data",
default="./AggregateFeatures/borderline_score_cleanedPlt_cleanedSpc_cleanedCvc_sim_respLogLen.csv",nargs='?')
#parser.set_defaults(conll=False)
parsed_args = parser.parse_args(sys.argv[1:])
print(":::",parsed_args.fn,parsed_args.clf,parsed_args.fs,parsed_args.fst)
cv = 50
feature_num = int(parsed_args.fn)
gp_type = parsed_args.clf
feature_selector = parsed_args.fs
feature_set_name = parsed_args.fst
data_path = parsed_args.data
### read features and scores
length_features = [1]
opinion_features = list(range(2,28))
#opinion_features = [2,4] ### only use self_prev and other_mean, all
#opinion_features = [2,18] ### only use self_prev and all_mean, borderline
specificity_features = list(range(28,33))
politeness_features = list(range(33,38))
convincingness_features = list(range(38,43))
similarity_features = [43]
out_feature = [44]
h = {"opinion":opinion_features,"politeness":politeness_features,"specificity":specificity_features,"length":length_features,'convincingness':convincingness_features,'similarity':similarity_features,'length':length_features}
feature_set = []
for x in feature_set_name.split("-"):
feature_set += h[x]
if feature_num==-1:
feature_num = len(feature_set)
print(len(feature_set))
data = pd.read_csv(data_path,usecols=feature_set+out_feature)
print("-->",data_path,data.columns.values)
data.fillna(0.)
### read features and scores
feature_names= np.array(list(data.columns.values)[:-1])
matrix = np.array(data.values)
features = matrix[:,:-1]
scores = matrix[:,-1]
#features, feature_names = addRebuttalScoreFeature(features,feature_names)
### simple model
#wanted_features = ['self_prev','all_mean','cvc_std','rev_resp_embd_sim','cvc_max','spec_max','plt_min','cvc_median', 'cvc_mean','plt_std','log_resp_length'] #all cases, significant features
#wanted_features = ['self_prev','all_mean','log_resp_length','cvc_max','plt_min','rev_resp_embd_sim','plt_std','plt_max','spec_std','cvc_min','cvc_std','spec_max'] #borderline cases, significant features
#wanted_features = ['self_prev','all_mean']
#features = features[:,np.array([feature_names[ii] in wanted_features for ii in range(len(feature_names))])]
#feature_names = np.array(wanted_features)
### simple model
print('feature matrix size: {}'.format(features.shape))
print(feature_names)
features = StandardScaler().fit_transform(features)
printCorrelation(scores,features,feature_names)
#printBaselineResults(features,feature_names,scores)
### shuffle the order of the features and training examples
indices = np.random.permutation(features.shape[0])
features = features[indices]
scores = scores[indices]
### shuffling the features
indices = np.random.permutation(features.shape[1])
feature_names = feature_names[indices]
features = features[:,indices]
#print(feature_names,features,":::")
#print(features.shape,feature_names,len(feature_names),feature_num); sys.exit(1)
print("-->",feature_num,"<--")
if feature_selector == 'vif' and features.shape[1] > 1:
features, feature_names = selectByVIF(features, feature_names, feature_num)
elif feature_selector == 'pca' and features.shape[1] > 1:
features, feature_names = selectByPCA(features,feature_names,feature_num)
#print(features.shape,scores.shape,"<--0")
F, pvalue = f_regression(features,scores)
#print('\np-values: {}\n'.format(pvalue))
regr = sklearn.linear_model.LinearRegression()
#print(features.shape,scores.shape,"<--1")
regr.fit(features,scores)
pred = regr.predict(features)
#print('coefficient: {}'.format(regr.coef_))
for i in range(len(regr.coef_)):
pval = pvalue[i]
if pval<0.01: star="***"
elif pval<0.05: star="**"
elif pval<0.1: star="*"
else: star=""
if feature_names is not None:
print(feature_names[i],"\t","%.3f"%regr.coef_[i],"\t","%.3f"%pvalue[i],star)
else:
print('feature {}'.format(i),"\t","%.3f"%regr.coef_[i],"\t","%.3f"%pvalue[i],star)
print('mean squared error : {}, variance score : {}'.format(mean_squared_error(pred,scores),r2_score(pred,scores)))
### remove insignificant features
#features = features[:,np.array([pp < 0.1 for pp in pvalue])]
#feature_names = feature_names[np.array([pp < 0.1 for pp in pvalue])]
if gp_type == 'gp':
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
elif gp_type == 'svr_rbf':
gp = SVR(kernel='rbf', C=1e3, gamma=0.1)
elif gp_type == 'svr_lin':
gp = SVR(kernel='linear', C=1e3)
elif gp_type == 'svr_poly':
gp = SVR(kernel='poly', C=1e3, degree=2)
elif gp_type == 'forest':
gp = RandomForestRegressor(max_depth=6, random_state=0)
elif gp_type == 'tree':
gp = DecisionTreeRegressor(random_state=0)
else:
gp = sklearn.linear_model.LinearRegression()
scores_cv = cross_val_score(gp,features,scores,cv=cv,scoring='neg_mean_squared_error')
print('feature num: {}'.format(features.shape[1]))
print('alg {}, {}-fold cv: mean {}, std {}'.format(gp_type,cv,scores_cv.mean(),scores_cv.std()))
| 41.109705
| 228
| 0.685826
|
687673dad4a1dca2e63146b033e042d2c2bd17d2
| 1,682
|
py
|
Python
|
nnef_tools/io/tensorflow/tflite_fb/SqueezeOptions.py
|
jnorwood/NNEF-Tools
|
5eb3755b5322040d42893e41b15093337abe04ce
|
[
"Apache-2.0"
] | 1
|
2019-09-06T19:37:01.000Z
|
2019-09-06T19:37:01.000Z
|
nnef_tools/io/tensorflow/tflite_fb/SqueezeOptions.py
|
Acidburn0zzz/NNEF-Tools
|
f9bcb3e043474d47f6a8a552abcc6d1069476072
|
[
"Apache-2.0"
] | null | null | null |
nnef_tools/io/tensorflow/tflite_fb/SqueezeOptions.py
|
Acidburn0zzz/NNEF-Tools
|
f9bcb3e043474d47f6a8a552abcc6d1069476072
|
[
"Apache-2.0"
] | null | null | null |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_fb
import flatbuffers
class SqueezeOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSqueezeOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SqueezeOptions()
x.Init(buf, n + offset)
return x
# SqueezeOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SqueezeOptions
def SqueezeDims(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# SqueezeOptions
def SqueezeDimsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# SqueezeOptions
def SqueezeDimsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
def SqueezeOptionsStart(builder): builder.StartObject(1)
def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0)
def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SqueezeOptionsEnd(builder): return builder.EndObject()
| 35.787234
| 158
| 0.701546
|
50bda1f204307fec9154b835c7c088c9552ff71f
| 2,175
|
py
|
Python
|
test/ml/test_breast_cancer.py
|
charmerDark/qiskit-aqua
|
c1564af8792c6664670807614a378147fd04d28f
|
[
"Apache-2.0"
] | 504
|
2018-12-15T16:34:03.000Z
|
2022-03-26T11:24:53.000Z
|
test/ml/test_breast_cancer.py
|
charmerDark/qiskit-aqua
|
c1564af8792c6664670807614a378147fd04d28f
|
[
"Apache-2.0"
] | 746
|
2018-12-16T16:44:42.000Z
|
2021-07-10T16:59:43.000Z
|
test/ml/test_breast_cancer.py
|
charmerDark/qiskit-aqua
|
c1564af8792c6664670807614a378147fd04d28f
|
[
"Apache-2.0"
] | 421
|
2018-12-22T14:49:00.000Z
|
2022-03-04T09:47:07.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Breast Cancer """
from test.ml import QiskitMLTestCase
import json
import numpy as np
from qiskit.ml.datasets import breast_cancer
class TestBreastCancer(QiskitMLTestCase):
""" Breast Cancer tests."""
def test_breast_cancer(self):
"""Breast Cancer test."""
input_file = self.get_resource_path('sample_train.breast_cancer')
with open(input_file, encoding="utf8") as file:
sample_train_ref = json.load(file)
input_file = self.get_resource_path('training_input.breast_cancer')
with open(input_file, encoding="utf8") as file:
training_input_ref = json.load(file)
input_file = self.get_resource_path('test_input.breast_cancer')
with open(input_file, encoding="utf8") as file:
test_input_ref = json.load(file)
sample_train, training_input, test_input, class_labels = breast_cancer(training_size=20,
test_size=10,
n=2,
plot_data=False)
np.testing.assert_allclose(sample_train.tolist(), sample_train_ref, rtol=1e-04)
for key, _ in training_input.items():
np.testing.assert_allclose(training_input[key].tolist(),
training_input_ref[key], rtol=1e-04)
for key, _ in test_input.items():
np.testing.assert_allclose(test_input[key].tolist(), test_input_ref[key], rtol=1e-04)
np.testing.assert_array_equal(class_labels, list(training_input.keys()))
| 43.5
| 97
| 0.624828
|
fd06f5a20877f580553abd42adf53a293df446f7
| 2,482
|
py
|
Python
|
titanicTest.py
|
Biubiupia/MachineLearningProject
|
37ddb20cef0da8f5cd43eaee21a5248af4a41584
|
[
"MIT"
] | null | null | null |
titanicTest.py
|
Biubiupia/MachineLearningProject
|
37ddb20cef0da8f5cd43eaee21a5248af4a41584
|
[
"MIT"
] | null | null | null |
titanicTest.py
|
Biubiupia/MachineLearningProject
|
37ddb20cef0da8f5cd43eaee21a5248af4a41584
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 31 09:30:24 2018
@author: Administrator
随机森林不需要预处理数据
"""
# 导入数据预处理,包括标准化处理或正则处理
from sklearn import preprocessing
from sklearn.preprocessing import Imputer
from sklearn.impute import SimpleImputer
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
# 中文字体设置
from matplotlib.font_manager import FontProperties
font=FontProperties(fname=r"c:\windows\fonts\simsun.ttc",size=14)
# 读取变量名文件
varibleFileName="E:/datas/titanic.xlsx"
# 读取目标文件
targetFileName="E:/datas/target.xlsx"
# 读取excel
data=pd.read_excel(varibleFileName)
target=pd.read_excel(targetFileName)
y=target.values
data_dummies=pd.get_dummies(data)
print('features after one-hot encoding:\n',list(data_dummies.columns))
features=data_dummies.ix[:,"Pclass":'Embarked_S']
x=features.values
# 数据预处理
imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=0)
imp.fit(x)
x = imp.transform(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
trees=1000
max_depth=10
# n_estimators表示树的个数,测试中100颗树足够
forest=RandomForestClassifier(n_estimators=trees,random_state=0,max_depth=max_depth)
forest.fit(x_train,y_train)
print("random forest with %d trees:"%trees)
print("accuracy on the training subset:{:.3f}".format(forest.score(x_train,y_train)))
print("accuracy on the test subset:{:.3f}".format(forest.score(x_test,y_test)))
# print('Feature importances:{}'.format(forest.feature_importances_))
names=features.columns
importance=forest.feature_importances_
zipped = zip(importance,names)
list1=list(zipped)
list1.sort(reverse=True)
# print(list1)
n_features=data_dummies.shape[1]
plt.barh(range(n_features),forest.feature_importances_,align='center')
plt.yticks(np.arange(n_features),features)
plt.title("random forest with %d trees,%dmax_depth:"%(trees,max_depth))
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.show()
'''
random forest with 1000 trees:
accuracy on the training subset:0.983
accuracy on the test subset:0.878
random forest with 1000 trees,max_depth=4:
accuracy on the training subset:0.854
accuracy on the test subset:0.884
random forest with 1000 trees,max_depth=5:
accuracy on the training subset:0.853
accuracy on the test subset:0.887
random forest with 1000 trees,max_depth=9
accuracy on the training subset:0.871
accuracy on the test subset:0.890
'''
| 28.528736
| 85
| 0.794923
|
49edeed5517761e1dd5030828827e08742195cf3
| 67,664
|
py
|
Python
|
nova/compute/resource_tracker.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | null | null | null |
nova/compute/resource_tracker.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | null | null | null |
nova/compute/resource_tracker.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
self.stats = stats.Stats()
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
def get_node_uuid(self, nodename):
try:
return self.compute_nodes[nodename].uuid
except KeyError:
raise exception.ComputeHostNotFound(host=nodename)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
# Remove usage for an incoming/outgoing migration on the destination
# node.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.pop(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
if self._check_for_nodes_rebalance(context, resources, nodename):
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context, nodename):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
def _pair_instances_to_migrations(self, migrations, instances):
instance_by_uuid = {inst.uuid: inst for inst in instances}
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialize the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations, nodename)
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.info("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update(self, context, compute_node):
"""Update partial stats locally and populate them to Scheduler."""
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB.
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
# At the moment we still need this check and save compute_node.
compute_node.save()
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's get_inventory(). So even there
# is no resource change for compute_node as above, we need proceed
# to get inventory and use scheduler_client interfaces to update
# inventory to placement. It's scheduler_client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
context,
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(context, compute_node)
try:
traits = self.driver.get_traits(nodename)
except NotImplementedError:
pass
else:
# NOTE(mgoddard): set_traits_for_provider does not refresh the
# provider tree in the report client, so we rely on the above call
# to set_inventory_for_provider or update_compute_node to ensure
# that the resource provider exists in the tree and has had its
# cached traits refreshed.
self.reportclient.set_traits_for_provider(
context, compute_node.uuid, traits)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
cn.running_vms = self.stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating from migration %s", uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False, require_allocation_refresh=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
cn = self.compute_nodes[nodename]
self.stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
if require_allocation_refresh:
LOG.debug("Auto-correcting allocations.")
self.reportclient.update_instance_allocation(context, cn,
instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
cn.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
# NOTE(jaypipes): In Pike, we need to be tolerant of Ocata compute
# nodes that overwrite placement allocations to look like what the
# resource tracker *thinks* is correct. When an instance is
# migrated from an Ocata compute node to a Pike compute node, the
# Pike scheduler will have created a "doubled-up" allocation that
# contains allocated resources against both the source and
# destination hosts. The Ocata source compute host, during its
# update_available_resource() periodic call will find the instance
# in its list of known instances and will call
# update_instance_allocation() in the report client. That call will
# pull the allocations for the instance UUID which will contain
# both the source and destination host providers in the allocation
# set. Seeing that this is different from what the Ocata source
# host thinks it should be and will overwrite the allocation to
# only be an allocation against itself.
#
# And therefore, here we need to have Pike compute hosts
# "correct" the improper healing that the Ocata source host did
# during its periodic interval. When the instance is fully migrated
# to the Pike compute host, the Ocata compute host will find an
# allocation that refers to itself for an instance it no longer
# controls and will *delete* all allocations that refer to that
# instance UUID, assuming that the instance has been deleted. We
# need the destination Pike compute host to recreate that
# allocation to refer to its own resource provider UUID.
#
# For Pike compute nodes that migrate to either a Pike compute host
# or a Queens compute host, we do NOT want the Pike compute host to
# be "healing" allocation information. Instead, we rely on the Pike
# scheduler to properly create allocations during scheduling.
#
# Pike compute hosts may still rework an
# allocation for an instance in a move operation during
# confirm_resize() on the source host which will remove the
# source resource provider from any allocation for an
# instance.
#
# In Queens and beyond, the scheduler will understand when
# a move operation has been requested and instead of
# creating a doubled-up allocation that contains both the
# source and destination host, the scheduler will take the
# original allocation (against the source host) and change
# the consumer ID of that allocation to be the migration
# UUID and not the instance UUID. The scheduler will
# allocate the resources for the destination host to the
# instance UUID.
compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
has_ocata_computes = compute_version < 22
# Some drivers (ironic) still need the allocations to be
# fixed up, as they transition the way their inventory is reported.
require_allocation_refresh = (
has_ocata_computes or
self.driver.requires_allocation_refresh)
msg_allocation_refresh = (
"Compute driver doesn't require allocation refresh and we're on a "
"compute host in a deployment that only has compute hosts with "
"Nova versions >=16 (Pike). Skipping auto-correction of "
"allocations. ")
if require_allocation_refresh:
if self.driver.requires_allocation_refresh:
msg_allocation_refresh = (
"Compute driver requires allocation refresh. ")
elif has_ocata_computes:
msg_allocation_refresh = (
"We're on a compute host from Nova version >=16 (Pike or "
"later) in a deployment with at least one compute host "
"version <16 (Ocata or earlier). ")
msg_allocation_refresh += (
"Will auto-correct allocations to handle "
"Ocata-style assumptions.")
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
if msg_allocation_refresh:
LOG.debug(msg_allocation_refresh)
msg_allocation_refresh = False
self._update_usage_from_instance(context, instance, nodename,
require_allocation_refresh=require_allocation_refresh)
def _remove_deleted_instances_allocations(self, context, cn,
migrations):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
known_instances = set(self.tracked_instances.keys())
allocations = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid) or {}
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in known_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
try:
instance = objects.Instance.get_by_uuid(read_deleted_context,
instance_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the scheduler
# _just_ created an allocation for it and we're racing with the
# creation in the cell database, or the instance was deleted
# and fully archived before we got a chance to run this. The
# former is far more likely than the latter. Avoid deleting
# allocations for a building instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
# NOTE(jaypipes): This will not be true if/when we support
# cross-cell migrations...
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances.
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for debugging information but don't attempt to
# delete or change the allocation.
LOG.debug("Instance %s has been moved to another host %s(%s). "
"There are allocations remaining against the source "
"host that might need to be removed: %s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
"""Delete instance allocations for the node during a failed resize
:param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
the new_flavor resources are subtracted from the single allocation.
:param flavor: This is the new_flavor during a resize.
"""
cn = self.compute_nodes[node]
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn.uuid, self.reportclient, flavor):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
operation = 'resize'
LOG.error('Failed to clean allocation after a failed '
'%(operation)s on node %(node)s',
{'operation': operation, 'node': cn.uuid},
instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': object_or_dict.flavor.root_gb,
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
| 46.632667
| 79
| 0.622872
|
932a16f38bd3e371e894a271b9481ab24379cc18
| 1,095
|
py
|
Python
|
permabots/models/environment_vars.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | null | null | null |
permabots/models/environment_vars.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | null | null | null |
permabots/models/environment_vars.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from permabots.models.base import PermabotsModel
from permabots.models import Bot
import logging
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class EnvironmentVar(PermabotsModel):
"""
Environment Variable associated to a Bot.
Use it in contexts as {{ env.variable_key }}.
"""
bot = models.ForeignKey(Bot, verbose_name=_('Bot'), related_name="env_vars", help_text=_("Bot which variable is attached."), on_delete=models.CASCADE)
key = models.CharField(_('Key'), max_length=255, help_text=_("Name of the variable"))
value = models.CharField(_('Value'), max_length=255, help_text=_("Value of the variable"))
class Meta:
verbose_name = _('Environment Var')
verbose_name_plural = _('Environment Vars')
def __str__(self):
return "(%s, %s)" % (self.key, self.value)
def as_json(self):
return {self.key: self.value}
| 35.322581
| 154
| 0.702283
|
1056290896a217342ab163dfeec84bbe5f3c803a
| 7,224
|
py
|
Python
|
refparse/control.py
|
MattWellie/refparse
|
068721329b70cd727301c1c1c08f6989ce31c17b
|
[
"MIT"
] | null | null | null |
refparse/control.py
|
MattWellie/refparse
|
068721329b70cd727301c1c1c08f6989ce31c17b
|
[
"MIT"
] | null | null | null |
refparse/control.py
|
MattWellie/refparse
|
068721329b70cd727301c1c1c08f6989ce31c17b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import configparser
from reader import Reader
from latex_writer import LatexWriter
from GBParser import GBParser
from LRGParser import LRGParser
from primer_module import Primer
import os
import logging
from utilities import check_file_type
__author__ = "mwelland"
__version__ = 2.0
__version_date__ = "06/08/2020"
"""
- The input file type is checked and the file_type variable is set
- If the input is LRG, an LRG_Parser instance is created
- If the input is GenBank, an GbkParser instance is created
- The appropriate Parser instance is used to read the input file
contents into a dictionary object which is returned
- The dictionary has the following structure:
Dict { pad
filename
genename
refseqname
transcripts { transcript { protein_seq
cds_offset
exons { exon_number { genomic_start
genomic_stop
transcript_start
transcript_stop
sequence (with pad)
- Use of this dictionary structure allows for use of absolute references
to access each required part of the processed input, and allows for
the extension of the format to include any features required later
- The returned dictionary is passed through a Reader instance, which scans
through the created dictionary, and creates a list of Strings which
represent the typesetting which will be used for the final output.
- The Reader instance has been chosen to write out in a generic format, to
allow the dictionary contents to be used as a text output or for LaTex.
Use of a Boolean write_as_latex variable can be used to decide whether
the output will include LaTex headers and footers
- The list output from the Reader instance is written to an output file using
a writer object. Currently this is a LatexWriter instance, using standard
printing to file.This could be replaced with a print to .txt for inspection
- The LatexWriter Class creates an output directory which contains a reference
to the input file name, intronic padding, the date and time. This is done
to ensure that the output directory is unique and identifies the exact point
in time when the output file was created
- The LatexWriter also creates the full PDF output using a Python facilitated
command line call. The output '.tex' file is created in the new output
directory and is processed using pdflatex
"""
def about():
"""
─────────▄──────────────▄
────────▌▒█───────────▄▀▒▌
────────▌▒▒▀▄───────▄▀▒▒▒▐
───────▐▄▀▒▒▀▀▀▀▄▄▄▀▒▒▒▒▒▐
─────▄▄▀▒▒▒▒▒▒▒▒▒▒▒█▒▒▄█▒▐
───▄▀▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▀██▀▒▌
──▐▒▒▒▄▄▄▒▒▒▒▒▒▒▒▒▒▒▒▒▀▄▒▒▌
──▌▒▒▐▄█▀▒▒▒▒▄▀█▄▒▒▒▒▒▒▒█▒▐
─▐▒▒▒▒▒▒▒▒▒▒▒▌██▀▒▒▒▒▒▒▒▒▀▄▌
─▌▒▀▄██▄▒▒▒▒▒▒▒▒▒▒▒░░░░▒▒▒▒▌
─▌▀▐▄█▄█▌▄▒▀▒▒▒▒▒▒░░░░░░▒▒▒▐
▐▒▀▐▀▐▀▒▒▄▄▒▄▒▒▒▒▒░░░░░░▒▒▒▒▌
▐▒▒▒▀▀▄▄▒▒▒▄▒▒▒▒▒▒░░░░░░▒▒▒▐
─▌▒▒▒▒▒▒▀▀▀▒▒▒▒▒▒▒▒░░░░▒▒▒▒▌
─▐▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▐
──▀▄▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▄▒▒▒▒▌
────▀▄▒▒▒▒▒▒▒▒▒▒▄▄▄▀▒▒▒▒▄▀
───▐▀▒▀▄▄▄▄▄▄▀▀▀▒▒▒▒▒▄▄▀
--──▐▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▀▀
So gene. Such reference. Wow.
"""
return
def find_filename(gene):
"""
checks within the 'primers' folder for a matching file name
returns the file with extension if a match is found
Args:
gene: name of the gene we are trying to match to a primer file
"""
try:
gene_name_files = {
os.path.splitext(fname)[0]: fname for fname in os.listdir("primers")
}
if gene in gene_name_files.keys():
return gene_name_files[gene]
except:
return False
return False
def run_parser():
file_type = check_file_type(args.input_file)
if file_type == "gbk":
gbk_reader = GBParser(args, app_settings)
dictionary = gbk_reader.run()
parser_details = gbk_reader.get_version
elif file_type == "lrg":
lrg_reader = LRGParser(args, app_settings)
dictionary = lrg_reader.run()
parser_details = lrg_reader.get_version
else:
raise Exception("Unrecognised file format: {}".format(file_type))
# check for a strict filename match and run the primer annotation if appropriate
filename_or_false = find_filename(dictionary["genename"])
if filename_or_false:
logging.info(
"Primer {} identified, running annotation".format(filename_or_false)
)
primer_label = Primer(dictionary, filename=filename_or_false)
dictionary = primer_label.run()
parser_details = "{} Parser: {}".format(file_type.upper(), parser_details)
for transcript in dictionary["transcripts"]:
version_details = "ReferenceTypeSetter: Version: {0}, Version Date: {1}".format(
__version__, __version_date__
)
list_of_versions = [parser_details, version_details]
lrg_num = "{}t{}".format(
args.input_file.split(".")[0].split("/")[1], transcript
)
input_reader = Reader(
args,
dictionary,
transcript,
list_of_versions,
file_type,
lrg_num,
app_settings,
)
input_list, nm = input_reader.run()
if file_type == "gbk":
filename = "{}_{}".format(dictionary["genename"], nm)
else:
filename = "{}_{}".format(dictionary["genename"], lrg_num)
writer = LatexWriter(input_list, filename, args.write_as_latex)
logging.info("Generated file {}".format(writer.run()))
def move_files(latex_file):
os.rename(latex_file, os.path.join("tex_files", latex_file))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser(
description="Customise reference sequence settings"
)
arg_parser.add_argument("-i", dest="input_file", required=True)
arg_parser.add_argument(
"--trim", dest="trim_flanking", action="store_false", default=True
)
arg_parser.add_argument(
"--clashes", dest="print_clashes", action="store_false", default=True
)
arg_parser.add_argument(
"--text",
dest="write_as_latex",
action="store_false",
default=True,
help="use the argument --text if you want to output only a text document (not conversion to PDF) - this prevents primer annotation",
)
arg_parser.add_argument(
"--config",
dest="settings",
default="settings/default_settings.ini",
help="location of a custom configparser configuration file",
)
arg_parser.add_argument("--author", default="mwelland")
args = arg_parser.parse_args()
app_settings = configparser.ConfigParser()
app_settings.read(args.settings)
run_parser()
logging.info("Process has completed successfully")
| 36.301508
| 140
| 0.586379
|
84b9452c6cb88f5d6c5c527ff606610120ed0e4c
| 1,123
|
py
|
Python
|
Week 1/id_700/88_mergeTwoSortArray.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-10-12T06:48:45.000Z
|
2019-10-12T06:48:45.000Z
|
Week 1/id_700/88_mergeTwoSortArray.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-12-01T10:02:03.000Z
|
2019-12-01T10:02:03.000Z
|
Week 1/id_700/88_mergeTwoSortArray.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | null | null | null |
# 给定两个有序整数数组 nums1 和 nums2,将 nums2 合并到 nums1 中,使得 num1 成为一个有序数组。
#
# 说明:
#
# 初始化 nums1 和 nums2 的元素数量分别为 m 和 n。
# 你可以假设 nums1 有足够的空间(空间大小大于或等于 m + n)来保存 nums2 中的元素。
#
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/merge-sorted-array
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
##### 思路1 暴力求解,将nums1的数组拷贝出来,nums1设置为空数组,然后循环遍历比较nums1.copy跟nums2的每个元素的大小,小的放进nums1中,
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
nums3 = nums1[:m] #### 将数组1 拷贝出来
nums1[:] =[] ##### 将数组1 设为空数组
i,j = 0,0
while i < m and j <n:
if nums3[i] <= nums2[j]:
nums1.append(nums3[i]) #######第一次写的时候nums1.append() 写错了,写成了nums1.append()=nums3[i]
i += 1
else:
nums1.append(nums2[j])
j += 1
if i < m: ######### 由于nums1 与 nums2 容量不等,不要忘记将后续的不需要比较的已经排好序的数值加入
nums1[i+j:] = nums3[i :]
if j < n:
nums1[i+j:] = nums2[j :]
| 32.085714
| 102
| 0.535174
|
227d3761d33f00daeaf174555f849152337706da
| 4,449
|
py
|
Python
|
scripts/msgf2seq.py
|
marcottelab/infer_complexes
|
45ee1fb948f48fcf7fa97289c67d098e533116e4
|
[
"MIT"
] | 1
|
2017-10-04T05:17:22.000Z
|
2017-10-04T05:17:22.000Z
|
scripts/msgf2seq.py
|
marcottelab/infer_complexes
|
45ee1fb948f48fcf7fa97289c67d098e533116e4
|
[
"MIT"
] | null | null | null |
scripts/msgf2seq.py
|
marcottelab/infer_complexes
|
45ee1fb948f48fcf7fa97289c67d098e533116e4
|
[
"MIT"
] | null | null | null |
from __future__ import division
import itertools as it
import numpy as np
import subprocess
from os.path import abspath
import os
import sys
sys.path.append(os.path.dirname(abspath(__file__))+'/../')
import utils as ut
import seqs
PROTON = 1.00727646677
WATER = 18.01048 # mass of a water due to no peptide bonds on the end
AAS = dict(
A = 71.03711,
R = 156.10111,
N = 114.04293,
D = 115.02694,
C = 160.030654, # iodoacetamide treatment
#C = 103.00919,
E = 129.04259,
Q = 128.05858,
G = 57.02146,
H = 137.05891,
I = 113.08406,
L = 113.08406,
K = 128.09496,
M = 131.04049,
F = 147.06841,
P = 97.05276,
S = 87.03203,
T = 101.04768,
W = 186.07931,
Y = 163.06333,
V = 99.06841
)
DECOY = 'rv_'
SEQ_DECOY = 'rm_'
searches = {
'logSpecProb_hit_list_best': 'msgfdb',
'MQscore_hit_list_best': 'inspect',
'xcorr_hit_list_best': 'tide'
}
def msgfbest2sequest_line(r, prots2genes, genes2prots, search):
fname_ind_scan_charge = r[0]
charge = float(r[1])
msgf_mass = float(r[2])
msgf_diff = float(r[3])
peptide = r[4]
protein = r[5]
protid = protein[3:] if protein.startswith(DECOY) else protein
if protid in prots2genes:
geneid = prots2genes[protid]
elif protid in genes2prots:
geneid = protid
else:
print "WARNING: %s not gene or protein in fasta; assumed gene" % protid
geneid = protid
seq_protein = SEQ_DECOY + geneid if protein.startswith(DECOY) else geneid
seq_mass = msgf2seq_mass(msgf_mass, charge, search)
seq_diff = calculate_mass(peptide) - seq_mass
seq_diff_temp = seq_diff
for i in range(5):
if abs(seq_diff_temp) < .15:
seq_diff = seq_diff_temp
break
else:
seq_diff_temp += -np.sign(seq_diff_temp)*PROTON
line_out = ' '.join([str(i) for i in
[
1, # placeholder
fname_ind_scan_charge,
seq_mass,
'({0:+.05f})'.format(seq_diff),
5,5,5,1,"12/345",1234.5,0, # placeholders
seq_protein,
'Z.%s.Z' % peptide
]])
return line_out
def msgf2seq_mass(m, z, search):
# msgfdb and inspect use observed m/z.
# tide uses something weird.
# sequest uses an estimate of the molecular mass of the observed peptide
if search in ['msgfdb','inspect']:
return m * z - z * PROTON
elif search == 'tide':
return m * z
def calculate_mass(peptide):
return sum([AAS[aa] for aa in peptide]) + WATER
def msgf2seq_file(filepath, fasta_file, msb_psms):
"""
msb_psms: set of spectid_peptidesequence
"""
def parse_spec_pep_row(r):
# get spec_pep from _best file format
parsed = '_'.join(r[0].split('.')[:2] + [r[4]])
#print parsed
return parsed
usedir,fin = os.path.split(filepath)
# Get the sample filename from the first item of the third line
fout = next(it.islice(ut.load_tab_file(filepath),2,3))[0].split('.')[0]
in_gen = ut.load_tab_file(filepath)
in_gen.next(); in_gen.next() # skip 2 lines
p2g = seqs.prots2genes(fasta_file)
g2p = ut.dict_inverse(p2g)
fout = os.path.join(usedir, '.'.join([fout, fin.split('.')[-1] ,
'sequestformat']))
search = searches[filepath.split('.')[-1]]
print "Converting/filtering; Search:", search
output = (msgfbest2sequest_line(r,p2g, g2p, search) for r in in_gen
if parse_spec_pep_row(r) in msb_psms)
print "Writing", fout
ut.write_tab_file(output, fout)
return fout
def parse_msb_psms(fname):
item1s = (line[0] for line in ut.load_tab_file(fname))
# ex: WAN110811_HCW_HEK293NE_P1D08.01387.2.SGNLTEDDKHNNAK
item1s.next() # skip 1 line
spect_pep = ('_'.join([sample,spect,pep]) for sample,spect,_,pep in
(i1.split('.') for i1 in item1s))
return set(spect_pep)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit("usage: python script_msgf2seq.py fasta_file msb_psm_file filename(s)")
fasta_file = sys.argv[1]
msb_psm_file = sys.argv[2]
filenames = sys.argv[3:]
print "Loading msblender output", msb_psm_file
msb_psms = parse_msb_psms(msb_psm_file)
#print "msb psms 0:", list(msb_psms)[0]
for f in filenames:
print "Loading search output", f
fout = msgf2seq_file(f, fasta_file, msb_psms)
| 30.682759
| 89
| 0.621938
|
031fef7b47575ab63fa043e9aeab3ec837eefa01
| 1,092
|
py
|
Python
|
lesson10/exercise1a.py
|
mfeindt0705/pynetmf
|
02fc092fd42ce5be5e160fa88b65c63d23408a6a
|
[
"Apache-2.0"
] | null | null | null |
lesson10/exercise1a.py
|
mfeindt0705/pynetmf
|
02fc092fd42ce5be5e160fa88b65c63d23408a6a
|
[
"Apache-2.0"
] | 7
|
2021-03-18T21:28:13.000Z
|
2022-02-10T10:39:10.000Z
|
lesson10/exercise1a.py
|
mfeindt0705/pynetmf
|
02fc092fd42ce5be5e160fa88b65c63d23408a6a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Create a Python script that executes "show version" on each of the network devices defined in my_devices.py.
This script should execute serially
"""
from datetime import datetime
from netmiko import ConnectHandler
from my_devices_mf import device_list as devices
import warnings
warnings.filterwarnings(action='ignore', module='.*paramiko.*')
def show_version(device):
net_connect = ConnectHandler(**device)
# send show version and wait for prompt
output = net_connect.send_command_expect("show version")
net_connect.disconnect()
return output
def main():
"""
Use Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this
"""
start_time = datetime.now()
for device in devices:
print()
print('#' * 40)
output = show_version(device)
print(output)
print()
print('#' * 40)
print("\nBenoetigte Zeit: " + str(datetime.now() - start_time))
return None
if __name__ == "__main__":
main()
| 24.818182
| 109
| 0.682234
|
bc655ea1c883d5fc12bf63902c1b97a84eed42fd
| 2,764
|
py
|
Python
|
jdcloud_sdk/services/monitor/models/Alarm.py
|
lidaobing/jdcloud-sdk-python
|
f305e8ddd74ab4ad445477744534e7299d4d93fb
|
[
"Apache-2.0"
] | null | null | null |
jdcloud_sdk/services/monitor/models/Alarm.py
|
lidaobing/jdcloud-sdk-python
|
f305e8ddd74ab4ad445477744534e7299d4d93fb
|
[
"Apache-2.0"
] | null | null | null |
jdcloud_sdk/services/monitor/models/Alarm.py
|
lidaobing/jdcloud-sdk-python
|
f305e8ddd74ab4ad445477744534e7299d4d93fb
|
[
"Apache-2.0"
] | 1
|
2019-03-01T08:44:37.000Z
|
2019-03-01T08:44:37.000Z
|
# coding=utf8
# Copyright 2018-2025 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Alarm(object):
def __init__(self, calculation=None, contactGroups=None, contactPersons=None, createTime=None, enabled=None, id=None, metric=None, metricName=None, noticePeriod=None, noticeTime=None, operation=None, period=None, region=None, resourceId=None, serviceCode=None, status=None, tag=None, threshold=None, times=None, value=None):
"""
:param calculation: (Optional) 统计方法:平均值=avg、最大值=max、最小值=min
:param contactGroups: (Optional) 通知的联系组,如 [“联系组1”,”联系组2”]
:param contactPersons: (Optional) 通知的联系人,如 [“联系人1”,”联系人2”]
:param createTime: (Optional) 创建时间
:param enabled: (Optional) 启用禁用 1启用,0禁用
:param id: (Optional) 规则id
:param metric: (Optional) 监控项
:param metricName: (Optional) 规则id监控项名称
:param noticePeriod: (Optional) 通知周期 单位:小时
:param noticeTime: (Optional) 报警的时间 , 查询正在报警规则时该字段有效
:param operation: (Optional) >=、>、<、<=、==、!=
:param period: (Optional) 统计周期(单位:分钟)
:param region: (Optional) 地域信息
:param resourceId: (Optional) 此规则所应用的资源id
:param serviceCode: (Optional) 报警规则对应的产品
:param status: (Optional) 监控项状态:1正常,2告警,4数据不足
:param tag: (Optional) 监控项附属信息
:param threshold: (Optional) 阈值
:param times: (Optional) 连续多少次后报警
:param value: (Optional) 报警值 , 查询正在报警规则时该字段有效
"""
self.calculation = calculation
self.contactGroups = contactGroups
self.contactPersons = contactPersons
self.createTime = createTime
self.enabled = enabled
self.id = id
self.metric = metric
self.metricName = metricName
self.noticePeriod = noticePeriod
self.noticeTime = noticeTime
self.operation = operation
self.period = period
self.region = region
self.resourceId = resourceId
self.serviceCode = serviceCode
self.status = status
self.tag = tag
self.threshold = threshold
self.times = times
self.value = value
| 41.878788
| 328
| 0.668234
|
6314414d0f7dfac608fc979495a757a1f0acd0e6
| 1,418
|
py
|
Python
|
migrations/versions/177a65486a0_.py
|
PyGotham/pygotham
|
310508c16dabf2ce9aaf0c2624132d725f71143b
|
[
"BSD-3-Clause"
] | 19
|
2015-03-03T19:50:55.000Z
|
2019-11-19T17:55:33.000Z
|
migrations/versions/177a65486a0_.py
|
PyGotham/pygotham
|
310508c16dabf2ce9aaf0c2624132d725f71143b
|
[
"BSD-3-Clause"
] | 124
|
2015-01-23T06:28:44.000Z
|
2020-10-03T12:59:30.000Z
|
migrations/versions/177a65486a0_.py
|
PyGotham/pygotham
|
310508c16dabf2ce9aaf0c2624132d725f71143b
|
[
"BSD-3-Clause"
] | 21
|
2015-04-15T03:01:07.000Z
|
2020-10-03T12:55:38.000Z
|
"""Add slugs to events.
Revision ID: 177a65486a0
Revises: 32dc1be3b22
Create Date: 2015-04-27 04:40:15.594924
"""
# revision identifiers, used by Alembic.
revision = '177a65486a0'
down_revision = '32dc1be3b22'
from alembic import op
from slugify import slugify
import sqlalchemy as sa
from sqlalchemy import Integer, String
from sqlalchemy.sql import table, column
events_table = table(
'events',
column('id', Integer),
column('name', String),
column('slug', String),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('slug', sa.String(length=75), nullable=True))
op.create_unique_constraint('events_slug_key', 'events', ['slug'])
### end Alembic commands ###
conn = op.get_bind()
events = conn.execute(events_table.select())
for event in events:
if not event.slug:
op.execute(
events_table.update().where(
events_table.c.id == event.id
).values(
slug=slugify(event.name)
)
)
op.alter_column('events', sa.Column('slug', sa.String(length=75), nullable=False))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('events_slug_key', 'events', type_='unique')
op.drop_column('events', 'slug')
### end Alembic commands ###
| 26.754717
| 86
| 0.636812
|
9da09aa1e942010041c160ee8cf11d403506ab60
| 482
|
py
|
Python
|
test_actionlib/curlpp/catkin_generated/pkg.installspace.context.pc.py
|
ChendiDotLin/FlexBE-Trial
|
a6b3ef005b6c4ac751377a2737a3efae2b439da6
|
[
"BSD-3-Clause"
] | null | null | null |
test_actionlib/curlpp/catkin_generated/pkg.installspace.context.pc.py
|
ChendiDotLin/FlexBE-Trial
|
a6b3ef005b6c4ac751377a2737a3efae2b439da6
|
[
"BSD-3-Clause"
] | null | null | null |
test_actionlib/curlpp/catkin_generated/pkg.installspace.context.pc.py
|
ChendiDotLin/FlexBE-Trial
|
a6b3ef005b6c4ac751377a2737a3efae2b439da6
|
[
"BSD-3-Clause"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/usr/local/include".split(';') if "/usr/local/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib;actionlib_msgs;message_generation;roscpp;rospy".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltest_actionlib".split(';') if "-ltest_actionlib" != "" else []
PROJECT_NAME = "test_actionlib"
PROJECT_SPACE_DIR = "/usr/local"
PROJECT_VERSION = "0.0.0"
| 53.555556
| 103
| 0.751037
|
c82514b68037cfed7ba552386499961a2d9332e2
| 3,321
|
py
|
Python
|
example/train_R_net.py
|
zuoqing1988/train-mtcnn-head
|
a19a0d494809035eff04b489962c7112a312aca3
|
[
"MIT"
] | 91
|
2019-01-03T01:58:56.000Z
|
2022-03-04T06:28:06.000Z
|
example/train_R_net.py
|
joey2014/train-mtcnn-head
|
10c120e7bfa5519bd81634138db63c97de034197
|
[
"MIT"
] | 7
|
2019-02-14T07:53:32.000Z
|
2020-03-06T06:50:56.000Z
|
example/train_R_net.py
|
joey2014/train-mtcnn-head
|
10c120e7bfa5519bd81634138db63c97de034197
|
[
"MIT"
] | 20
|
2019-01-20T08:37:47.000Z
|
2020-09-18T23:20:54.000Z
|
import argparse
import mxnet as mx
import sys,os
sys.path.append(os.getcwd())
from core.imdb import IMDB
from train import train_net
from core.symbol import R_Net
def train_R_net(image_set, root_path, dataset_path, prefix, ctx,
pretrained, epoch, begin_epoch, end_epoch, batch_size, thread_num,
frequent, lr, lr_epoch, resume):
imdb = IMDB("mtcnn", image_set, root_path, dataset_path, 'train')
gt_imdb = imdb.get_annotations()
sym = R_Net()
train_net(sym, prefix, ctx, pretrained, epoch, begin_epoch, end_epoch, gt_imdb, batch_size, thread_num,
24, True, True, frequent, not resume, lr, lr_epoch)
def parse_args():
parser = argparse.ArgumentParser(description='Train refine net(24-net)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image_set', dest='image_set', help='training set',
default='train_24', type=str)
parser.add_argument('--root_path', dest='root_path', help='output data folder',
default='data', type=str)
parser.add_argument('--dataset_path', dest='dataset_path', help='dataset folder',
default='data/mtcnn', type=str)
parser.add_argument('--prefix', dest='prefix', help='new model prefix',
default='model/rnet', type=str)
parser.add_argument('--gpus', dest='gpu_ids', help='GPU device to train with',
default='0', type=str)
parser.add_argument('--pretrained', dest='pretrained', help='pretrained prefix',
default='model/rnet', type=str)
parser.add_argument('--epoch', dest='epoch', help='load epoch',
default=0, type=int)
parser.add_argument('--begin_epoch', dest='begin_epoch', help='begin epoch of training',
default=0, type=int)
parser.add_argument('--end_epoch', dest='end_epoch', help='end epoch of training',
default=16, type=int)
parser.add_argument('--batch_size', dest='batch_size', help='batch_size of training',
default=512, type=int)
parser.add_argument('--thread_num', dest='thread_num', help='thread num of training',
default=4, type=int)
parser.add_argument('--frequent', dest='frequent', help='frequency of logging',
default=100, type=int)
parser.add_argument('--lr', dest='lr', help='learning rate',
default=0.01, type=float)
parser.add_argument('--lr_epoch', dest='lr_epoch', help='learning rate epoch',
default='8,14', type=str)
parser.add_argument('--resume', dest='resume', help='continue training', action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with argument:'
print args
ctx = [mx.gpu(int(i)) for i in args.gpu_ids.split(',')]
lr_epoch = [int(i) for i in args.lr_epoch.split(',')]
train_R_net(args.image_set, args.root_path, args.dataset_path, args.prefix, ctx,
args.pretrained, args.epoch,
args.begin_epoch, args.end_epoch, args.batch_size, args.thread_num, args.frequent, args.lr, lr_epoch, args.resume)
| 52.714286
| 130
| 0.624511
|
6e7897ba5f12da26ab9ead63bd682875dcf76723
| 1,110
|
py
|
Python
|
stock_analysis/testing/bitcoin.py
|
RyanArnasonML/stock-analysis
|
a5c79d9c438f095dc370f2db4e4780356cdc5d01
|
[
"MIT"
] | null | null | null |
stock_analysis/testing/bitcoin.py
|
RyanArnasonML/stock-analysis
|
a5c79d9c438f095dc370f2db4e4780356cdc5d01
|
[
"MIT"
] | null | null | null |
stock_analysis/testing/bitcoin.py
|
RyanArnasonML/stock-analysis
|
a5c79d9c438f095dc370f2db4e4780356cdc5d01
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 10:24:21 2020
@author: ryanar
"""
import matplotlib.pyplot as plt
from stock_analysis import StockReader, StockVisualizer, Technical, AssetGroupVisualizer, StockAnalyzer, AssetGroupAnalyzer, StockModeler
from stock_analysis.utils import group_stocks, describe_group, make_portfolio
reader = StockReader('2019-01-01','2020-11-21')
bitcoin = reader.get_bitcoin_data()
sma_bitcoin = Technical(bitcoin).SimpleMovingAverage()
atr_bitcoin = Technical(bitcoin).AverageTrueRange()
obv_bitcoin = Technical(bitcoin).KalmanAverage()
bitcoin_viz = StockVisualizer(bitcoin)
ax = bitcoin_viz.evolution_over_time(
'close', figsize=(10, 4), legend=False,
title='Bitcoin closing price over time'
)
bitcoin_viz.add_reference_line(
ax, x=bitcoin.high.idxmax(), color='k', linestyle=':', alpha=0.5,
label=f'highest value ({bitcoin.high.idxmax():%b %d})')
ax.set_ylabel('price ($)')
ay = bitcoin_viz.trade_volume()
#az = bitcoin_viz.candle_stick()
#aa = bitcoin_viz.renko()
#ab = bitcoin_viz.qqplot()
ac = bitcoin_viz.histogram(column='close')
| 27.073171
| 138
| 0.743243
|
7642e7a20320678a4c783e6fe188aaae6f3f6841
| 1,124
|
py
|
Python
|
src/tutorials/3-Movan/2-Session/main.py
|
yungshenglu/tensorflow-practice
|
3ec162c64531b20e143937c97b6bb56a54a20a40
|
[
"Apache-2.0"
] | 1
|
2018-12-16T12:59:57.000Z
|
2018-12-16T12:59:57.000Z
|
src/tutorials/3-Movan/2-Session/main.py
|
yungshenglu/tensorflow-practice
|
3ec162c64531b20e143937c97b6bb56a54a20a40
|
[
"Apache-2.0"
] | 1
|
2019-01-01T10:17:33.000Z
|
2019-01-01T10:17:33.000Z
|
src/tutorials/3-Movan/2-Session/main.py
|
yungshenglu/tensorflow-practice
|
3ec162c64531b20e143937c97b6bb56a54a20a40
|
[
"Apache-2.0"
] | 1
|
2018-12-31T10:38:06.000Z
|
2018-12-31T10:38:06.000Z
|
#!/usr/bin/env python3
import sys
import tensorflow as tf
def main(arg):
''' Create TensorFlow model '''
# Define two matrices as constant
matrix1 = tf.constant([[3, 3]])
matrix2 = tf.constant([[2],[2]])
# Product two matrices (same as np.dot(matrix1, matrix2))
product = tf.matmul(matrix1, matrix2)
''' Start training '''
if arg == '1':
# Method 1 - Without using with
sess = tf.Session()
result1 = sess.run(product)
print(result1)
sess.close()
elif arg == '2':
# Method 2 - Session will be closed in with
with tf.Session() as sess:
result2 = sess.run(product)
print(result2)
''' ENTRY POINT '''
if __name__ == "__main__":
if len(sys.argv) != 2:
print('[ERROR] No argument')
print('[INFO] FORMAT: "python3 main.py 1" or "python3 main.py 2"')
sys.exit()
else:
if sys.argv[1] == '1':
print('[INFO] Runing TensorFlow session without using with')
elif sys.argv[1] == '2':
print('[INFO] Runing TensorFlow session using with')
else:
print('[ERROR] Invalid argument')
print('[INFO] FORMAT: "python3 main.py 1" or "python3 main.py 2"')
sys.exit()
main(sys.argv[1])
| 24.977778
| 69
| 0.645018
|
c6da50f994fb6dec8552e2cb6d5017c97cbec004
| 208
|
py
|
Python
|
chapter6/6.2/decorator_test.py
|
yifengyou/crazy-python
|
28099bd5011de6981a7c5412783952cc7601ae0c
|
[
"Unlicense"
] | null | null | null |
chapter6/6.2/decorator_test.py
|
yifengyou/crazy-python
|
28099bd5011de6981a7c5412783952cc7601ae0c
|
[
"Unlicense"
] | null | null | null |
chapter6/6.2/decorator_test.py
|
yifengyou/crazy-python
|
28099bd5011de6981a7c5412783952cc7601ae0c
|
[
"Unlicense"
] | null | null | null |
# coding:utf-8
# File Name: decorator_test
# Author : yifengyou
# Date : 2021/07/18
def funA(fn):
print("A")
fn()
return "hello"
@funA
def funB():
print("B")
print(funB)
| 12.235294
| 31
| 0.543269
|
b4b7cf1a2e8ba7903274e4aec953e6e8e421b934
| 17,545
|
py
|
Python
|
utilities/math_utils.py
|
amiravni/DataHack2018
|
3e8b7003f418186a4c351390c67453d8f6d90f4b
|
[
"BSD-3-Clause"
] | 10
|
2018-09-26T21:37:20.000Z
|
2021-02-02T22:28:28.000Z
|
utilities/math_utils.py
|
amiravni/DataHack2018
|
3e8b7003f418186a4c351390c67453d8f6d90f4b
|
[
"BSD-3-Clause"
] | null | null | null |
utilities/math_utils.py
|
amiravni/DataHack2018
|
3e8b7003f418186a4c351390c67453d8f6d90f4b
|
[
"BSD-3-Clause"
] | 2
|
2018-10-06T06:49:12.000Z
|
2019-06-18T21:20:07.000Z
|
# Copyright (C) 2018 Innoviz Technologies
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD 3-Clause license. See the LICENSE file for details.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as plt_patches
import shapely.geometry
import shapely.affinity
from shapely.prepared import prep
from descartes import PolygonPatch
class RotationTranslationData(object):
"""
Abstraction class that represents rigid body transformation and can accept rotations as quaternions, rotation angles
around axes
"""
def __init__(self, mat=None, vecs=None, rt=None):
"""
:param mat: 4x4 transformation matrix. translation is at the last column.
:param vecs: tuple of two vectors (rotation, translation). If rotation has 3 elements it is assumed to be
rotation angles around the axes (x, y, z), if it has 4 elements it is assumed to be quaternion
:param rt: tuple of a 3x3 matrix and a 3 element vector of rotation and translation respectively
"""
self._rotation_angles = None
self._translation = None
self._axis_angle = None
self._q = None
self._mat = None
assert sum((mat is not None, vecs is not None, rt is not None)) == 1
# received 4x4 matrix as input
if mat is not None:
assert (mat.shape == (4, 4))
self._mat = mat
self._rotation = self._mat[:3, :3]
self._translation = self._mat[:3, 3]
# received rotation vector and translation vector
elif vecs is not None:
assert all(isinstance(vec, np.ndarray) for vec in vecs)
assert len(vecs) == 2 and vecs[1].shape == (3,)
if vecs[0].shape == (3,):
# rotation angles
self._rotation_angles = vecs[0]
self._rotation = rotation_angles_to_rotation(vecs[0])
self._translation = vecs[1]
else:
# quaternion
assert vecs[0].shape == (4,)
self._q = vecs[0]
self._rotation = quaternion_to_rotation(vecs[0])
self._translation = vecs[1]
# received rotation matrix and translation vector
else:
assert len(rt) == 2 and all(isinstance(i, np.ndarray) for i in rt)
assert rt[0].shape == (3, 3) and rt[1].shape == (3,)
self._translation = rt[1]
self._rotation = rt[0]
@property
def to_matrix(self):
return rotation_translation_to_4d(self.rotation, self.translation)
@property
def rotation_angles(self):
if self._rotation_angles is None:
self._rotation_angles = extract_rotation(self.rotation)
return self._rotation_angles
@property
def axis_angle(self):
if self._axis_angle is None:
self._axis_angle = extract_axis_angle(self.rotation)
return self._axis_angle
@property
def quaternion(self):
if self._q is None:
self._q = extract_quaternion(self.rotation)
return self._q
@property
def to_rt(self):
return self.rotation_angles, self.translation
@property
def to_axis_angle_translation(self):
return self.axis_angle, self.translation
@property
def to_quaternion_translation(self):
return self.quaternion, self.translation
@property
def translation(self):
if self._translation is None:
self._translation = extract_translation(self._mat)
return self._translation
@property
def rotation(self):
return self._rotation
def apply_transform(self, points):
assert points.ndim == 2 or points.ndim == 1
assert points.shape[-1] == 3
return np.matmul(points, self._rotation.T) + self._translation[None, :]
def apply_transform_complete_pc(self, points):
assert points.ndim == 2 or points.ndim == 1
assert points.shape[-1] == 3 or points.shape[-1] == 4 or points.shape[-1] == 5
pc = points[:, :3]
trans_pc = np.matmul(pc, self._rotation.T) + self._translation[None, :]
return np.concatenate((trans_pc, points[:, 3:]), axis=-1)
def inverse(self):
'''
:return: The inverse transformation
'''
_R = self.rotation.T
_t = -np.dot(self.rotation.T, self.translation)
return RotationTranslationData(rt=(_R, _t))
def compose(self, transform):
"""
Compose this above another transform. Assumes all matrices multiply vectors from the left (i.e. A*x).
:param transform: transform to compose
:return: composed transform _matrix * transform
"""
assert isinstance(transform, RotationTranslationData)
R_new = self.rotation.dot(transform.rotation)
t_new = self.rotation.dot(transform.translation) + self.translation
return RotationTranslationData(rt=(R_new, t_new))
@classmethod
def from_axis_angle_translation(cls, axis_angle, translation):
assert isinstance(axis_angle, np.ndarray) and axis_angle.shape == (4,)
assert isinstance(translation, np.ndarray) and translation.shape == (3,)
R = axis_angle_to_rotation(axis_angle[1:], axis_angle[0])
return RotationTranslationData(rt=(R, translation))
@classmethod
def align_plane_to_ground(cls, u, d):
"""
Generates a rotation translation object that rotates such that u coincides with (0, 0, -1) and d=0
:param vector1: the vector to be rotated
:param vector2: the vector to be rotated to
:return: RotationTranslationData
"""
u = u.ravel()
c = np.sqrt(1-u[0]**2)
cinv = 1 / c
R = np.array([[c, -u[0]*u[1]*cinv, -u[0]*u[2]*cinv],
[0, -u[2] * cinv, u[1] * cinv],
[-u[0], -u[1], -u[2]]])
return RotationTranslationData(rt=(R, np.array([0., 0., d])))
@staticmethod
def identity():
return RotationTranslationData(rt=(np.eye(3), np.zeros(3)))
def __str__(self):
return "<RotationTranslationData: Rotation=({:.2f}, {:.2f}, {:.2f}), Translation=({:.2f}, {:.2f}, {:.2f})>"\
.format(self.rotation_angles[0], self.rotation_angles[1], self.rotation_angles[2],
self.translation[0], self.translation[1], self.translation[2])
def euler_matrix(yaw, pitch, roll):
# angles in radians
# negating pitch for easier computation
pi=np.pi+1e-7
assert -pi <= yaw <= pi and -np.pi <= roll <= np.pi and -np.pi/2 <= pitch <= np.pi/2, \
"Erroneous yaw, pitch, roll={},{},{}".format(yaw, pitch, roll)
rotX = np.eye(3)
rotY = np.eye(3)
rotZ = np.eye(3)
rotX[1:, 1:] = rot_mat_2d(roll)
rotY[::2, ::2] = rot_mat_2d(pitch)
rotZ[:2, :2] = rot_mat_2d(yaw)
return rotZ.dot(rotY.dot(rotX))
def yaw(points):
# calculate yaw of points given in nx3 format. yaw in [-pi, pi]
assert isinstance(points, np.ndarray)
assert points.ndim == 2
assert points.shape[1] == 3
return np.arctan2(points[:, 1], points[:, 0])
def pitch(points):
# calculate pitch of points given in nx3 format. pitch in [-pi/2, pi/2]
assert isinstance(points, np.ndarray)
assert points.ndim == 2
assert points.shape[1] == 3
return np.arctan2(points[:, 2], np.linalg.norm(points[:, :2], axis=1))
def rot_mat_2d(angle):
# angle in radians
return np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
def extract_rotation(mat):
# According to element [1,1] in table in https://en.wikipedia.org/wiki/Euler_angles (X1Y2Z3)
# returns (roll, pitch, yaw) or (X angle, Y angle, Z angle)
return np.array([np.arctan2(-mat[1, 2], mat[2, 2]), np.arcsin(mat[0, 2]), np.arctan2(-mat[0, 1], mat[0, 0])])
def extract_translation(mat):
return np.array((mat[0, 3], mat[1, 3], mat[2, 3]))
def rotation_translation_to_4d(rotation, translation):
""" returns 4x4 rotation translation matrix given a 3x3 rotation matrix and 3 translation vector"""
return np.vstack((np.hstack((rotation,translation.reshape((-1, 1)))), np.array((0, 0, 0, 1))))
def pose_from_rt(rotation, translation):
# rotation is received as roll, pitch, yaw
# Convert to rotation translation. Roll and yaw angles are in [-pi, pi] while pitch is [-pi/2, pi/2]
return rotation_translation_to_4d(rotation_angles_to_rotation(rotation), translation)
def pose_from_axis_angle(axis, angle, translation):
return rotation_translation_to_4d(axis_angle_to_rotation(axis, angle), translation)
def pose_from_quaternion(q, translation):
return rotation_translation_to_4d(quaternion_to_rotation(q), translation)
def rotation_angles_to_rotation(rotation):
return euler_matrix(rotation[2], rotation[1], rotation[0])
def axis_angle_to_rotation(axis, angle):
# converts axis angle notation to a rotation matrix. angle is assumed in radians and in [0, 2pi],
# axis should be normalized.
# formula from https://en.wikipedia.org/wiki/Rotation_matrix#Conversion_from_and_to_axis-angle
assert isinstance(axis, np.ndarray) and axis.shape == (3,)
assert np.abs(np.linalg.norm(axis) - 1.) < 1e-6
assert 0 <= angle <= np.pi * 2
rotation_matrix = np.cos(angle) * np.eye(3) + np.sin(angle) * cross_product_matrix(axis) + \
(1 - np.cos(angle)) * np.tensordot(axis, axis, axes=0)
return rotation_matrix
def cross_product_matrix(vector):
assert isinstance(vector, np.ndarray) and vector.shape == (3,)
matrix = np.array([[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]])
return matrix
def extract_axis_angle(rot_mat):
# Convert from rotation matrix to axis angle. This conversion is good for angles in [0, pi], angles in [-pi, 0] will
# be mapped to [pi, 0] (pi downto 0) with the negative axis. To handle this issue we can use quaternions.
assert isinstance(rot_mat, np.ndarray) and rot_mat.shape == (3, 3,)
u = np.array([rot_mat[2, 1] - rot_mat[1, 2],
rot_mat[0, 2] - rot_mat[2, 0],
rot_mat[1, 0] - rot_mat[0, 1]])
angle = np.arccos(np.trace(rot_mat[:3, :3]) / 2 - 0.5)
if np.linalg.norm(u) == 0.:
return np.array([0., 0., 0., 1.])
else:
u = u / np.linalg.norm(u)
return np.array([angle, u[0], u[1], u[2]])
def extract_quaternion(R):
d = np.diagonal(R)
t = np.sum(d)
if t + 1 < 0.25:
symmetric_mat = R + R.T
asymmetric_mat = R - R.T
symmetric_diag = np.diagonal(symmetric_mat)
i_max = np.argmax(symmetric_diag)
q = np.empty(4)
if i_max == 0:
q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
normalizer = 1 / q[1]
q[2] = symmetric_mat[1, 0] / 4 * normalizer
q[3] = symmetric_mat[2, 0] / 4 * normalizer
q[0] = asymmetric_mat[2, 1] / 4 * normalizer
elif i_max == 1:
q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
normalizer = 1 / q[2]
q[1] = symmetric_mat[1, 0] / 4 * normalizer
q[3] = symmetric_mat[2, 1] / 4 * normalizer
q[0] = asymmetric_mat[0, 2] / 4 * normalizer
elif i_max == 2:
q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
normalizer = 1 / q[3]
q[1] = symmetric_mat[2, 0] / 4 * normalizer
q[2] = symmetric_mat[1, 2] / 4 * normalizer
q[0] = asymmetric_mat[1, 0] / 4 * normalizer
else:
r = np.sqrt(1+t)
s = 0.5 / r
q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])
return q
def quaternion_to_rotation(q):
"""
Conversion from quaternion vector (w,x,y,z) to a rotation matrix.
Based on https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
"""
w, x, y, z = tuple(q)
n = np.dot(q, q)
s = 0. if n == 0. else 2./n
wx, wy, wz = s*w*x, s*w*y, s*w*z
xx, xy, xz = s*x*x, s*x*y, s*x*z
yy, yz, zz = s*y*y, s*y*z, s*z*z
R = np.array([[1-yy-zz, xy-wz, xz+wy],
[xy+wz, 1-xx-zz, yz-wx],
[xz-wy, yz+wx, 1-xx-yy]])
return R
class Box(object):
def __init__(self, x, y, wx, wy, rotation, h):
self._center = np.array([x, y])
self._width = np.array([wx, wy])
self._rotation = rotation
self._height = h
c = shapely.geometry.box(-wx / 2.0, -wy / 2.0, wx / 2.0, wy / 2.0)
rc = shapely.affinity.rotate(c, -rotation, use_radians=True)
trc = shapely.affinity.translate(rc, x, y)
self._contour = trc
@property
def center(self):
return self._center
@property
def width(self):
return self._width
@property
def rotation(self):
return self._rotation
@property
def contour(self):
return self._contour
def intersects(self, other):
assert isinstance(other, Box)
return self.contour.intersects(other.contour)
def _intersection(self, other):
return self.contour.intersection(other.contour)
def intersection_many(self, others):
if len(others) > 1:
prepped_contour = prep(self.contour)
else:
prepped_contour = self.contour
intersections = [0.] * len(others)
for idx, other in enumerate(others):
if prepped_contour.intersects(other.contour):
intersections[idx] = self._intersection(other).area
return intersections
def union(self, other):
return self.contour.union(other.contour)
def iou(self, other):
intersection_area = self._intersection(other).area
return intersection_area / (self.area + other.area - intersection_area + 1e-9)
def iou_many(self, others):
if len(others) > 1:
prepped_contour = prep(self.contour)
else:
prepped_contour = self.contour
ious = [0.] * len(others)
for idx, other in enumerate(others):
if prepped_contour.intersects(other.contour):
ious[idx] = self.iou(other)
return ious
@classmethod
def boxes_iou(cls, box1, box2):
return box1.iou(box2)
@property
def area(self):
return self.contour.area
def draw(self, ax, color, line_width=1, fillcolor=None, name=None, arrow=True, alpha=0.2, scale=50):
ax.add_patch(PolygonPatch(self.contour, alpha=alpha, fc=fillcolor, ec=color, linewidth=line_width))
vertices = np.array(self.contour.exterior.coords)[1:]
if arrow:
arrow_center = np.mean(vertices, axis=0)
arrow_direction = (vertices[2] - vertices[1]) / 1.5
arrow_tail = arrow_center - arrow_direction / 2
arrow_head = arrow_center + arrow_direction / 2
style = plt_patches.ArrowStyle.Simple(head_length=.4, head_width=.6, tail_width=.1)
x = np.array(ax.axis())
scale_factor = np.sqrt(np.prod(np.abs(x[::2] - x[1::2])) / (60 * 60))
arrow_patch = plt_patches.FancyArrowPatch(posA=arrow_tail, posB=arrow_head, arrowstyle=style,
color='w', mutation_scale= scale / scale_factor, alpha=0.4)
ax.add_patch(arrow_patch)
elif name is None:
name = 'front'
if name is not None:
text_location = np.mean(vertices[[0, -1]], axis=0)
ax.text(text_location[0], text_location[1], name, ha='center', va='top', color='w')
@classmethod
def from_numpy(cls, numpy_arr):
# assumes input as (x,y,wx,wy,h,angle)
assert isinstance(numpy_arr, np.ndarray)
assert (numpy_arr.ndim == 1 and numpy_arr.size == 6) or (numpy_arr.ndim == 2 and numpy_arr.shape[1] == 6)
if numpy_arr.ndim == 1:
return Box(numpy_arr[0], numpy_arr[1], numpy_arr[2], numpy_arr[3], numpy_arr[5], numpy_arr[4])
else:
return [Box(numpy_arr[i, 0], numpy_arr[i, 1], numpy_arr[i, 2],
numpy_arr[i, 3], numpy_arr[i, 5], numpy_arr[i, 4]) for i in range(numpy_arr.shape[0])]
@classmethod
def from_xyxy(cls, box):
center = 0.5*(box[:2] + box[2:])
width = (box[2:] - box[:2])
return Box(center[0], center[1], width[0], width[1], 0., 0.)
def draw_point(point, axes=None, color='k'):
circle = plt.Circle((point[0], point[1]), 0.005, color='k')
if axes is None:
axes = plt
axes.add_artist(circle)
def box2dtobox3d(boxes2d, z_translation=0.0, z_size=0.0, z_angle=0.0):
"""
tranforms 2d boxes to 3d boxes
:param boxes2d: np array shaped N,4. box = [x1,y1,x2,xy] (1-bottom left, 2 upper right)
:return: boxes3d np array shaped N,7. box = [t1,t2,t3,s1,s2,s3,z_angle]
"""
ctr_x = np.mean(boxes2d[:, [0, 2]], axis=-1, keepdims=True)
ctr_y = np.mean(boxes2d[:, [1, 3]], axis=-1, keepdims=True)
ctr_z = np.full([boxes2d.shape[0], 1], z_translation)
ctr = np.concatenate((ctr_x, ctr_y, ctr_z), -1)
size_x = boxes2d[:, 2:3] - boxes2d[:, 0:1]
size_y = boxes2d[:, 3:4] - boxes2d[:, 1:2]
size_z = np.full([boxes2d.shape[0], 1], z_size)
size = np.concatenate((size_x, size_y, size_z), -1)
z_angle = np.full([boxes2d.shape[0], 1], z_angle)
return np.concatenate((ctr, size, z_angle), -1)
| 37.093023
| 120
| 0.603705
|
4e93c87af525c99da6c24c264f5bb3e8590a3022
| 93
|
py
|
Python
|
query.py
|
MatthewRobertDunn/tetrominos
|
f864f6da44e50e2ee435ad8fabef2679c1e21a8b
|
[
"MIT"
] | null | null | null |
query.py
|
MatthewRobertDunn/tetrominos
|
f864f6da44e50e2ee435ad8fabef2679c1e21a8b
|
[
"MIT"
] | null | null | null |
query.py
|
MatthewRobertDunn/tetrominos
|
f864f6da44e50e2ee435ad8fabef2679c1e21a8b
|
[
"MIT"
] | null | null | null |
def find_first(items, predicate):
return next(item for item in items if predicate(item))
| 31
| 58
| 0.752688
|
3ffbf649f78915f7ef1c775100f743d8f37becf4
| 10,966
|
py
|
Python
|
ari_backup/lvm.py
|
jpwoodbu/ari-backup
|
7c0c87e6ff7e10853f0f1478685c1250e176a9f7
|
[
"BSD-3-Clause"
] | 1
|
2018-05-02T13:33:51.000Z
|
2018-05-02T13:33:51.000Z
|
ari_backup/lvm.py
|
jpwoodbu/ari-backup
|
7c0c87e6ff7e10853f0f1478685c1250e176a9f7
|
[
"BSD-3-Clause"
] | 23
|
2015-01-04T08:03:03.000Z
|
2017-11-04T16:25:11.000Z
|
ari_backup/lvm.py
|
jpwoodbu/ari-backup
|
7c0c87e6ff7e10853f0f1478685c1250e176a9f7
|
[
"BSD-3-Clause"
] | 1
|
2015-09-20T04:38:18.000Z
|
2015-09-20T04:38:18.000Z
|
"""LVM based backup workflows and MixIn classes."""
import copy
import os
import gflags
import rdiff_backup_wrapper
FLAGS = gflags.FLAGS
gflags.DEFINE_string(
'snapshot_mount_root', '/tmp',
'root path for creating temporary directories for mounting LVM snapshots')
gflags.DEFINE_string('snapshot_suffix', '-ari_backup',
'suffix for LVM snapshots')
class LVMSourceMixIn(object):
"""MixIn class to work with LVM based backup sources.
This class registers pre-job and post-job hooks to create and mount LVM
snapshots before and after a backup job.
This class depends on the source_hostname instance variable which should be
defined by any subclass of workflow.BaseWorkFlow that also uses this mixin.
"""
def __init__(self, *args, **kwargs):
super(LVMSourceMixIn, self).__init__(*args, **kwargs)
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.snapshot_mount_root = FLAGS.snapshot_mount_root
self.snapshot_suffix = FLAGS.snapshot_suffix
# This is a list of 3-tuples, where each inner 3-tuple expresses the LV
# to back up, the mount point for that LV, and any mount options
# necessary. For example: [('hostname/root, '/', 'noatime'),]
# TODO(jpwoodbu) I wonder if noatime being used all the time makes
# sense to improve read performance and reduce writes to the snapshots.
self._logical_volumes = list()
# A list of dicts with the snapshot paths and where they should be
# mounted.
self._lv_snapshots = list()
# Mount the snapshots in a directory named for this job's label.
self._snapshot_mount_point_base_path = os.path.join(
self.snapshot_mount_root, self.label)
# Set up pre and post job hooks to manage snapshot workflow.
self.add_pre_hook(self._create_snapshots)
self.add_pre_hook(self._mount_snapshots)
self.add_post_hook(self._umount_snapshots)
self.add_post_hook(self._delete_snapshots)
# Maintain backward compatibility with old hooks interface.
@property
def lv_list(self):
self.logger.warning(
'lv_list is deprecated. Please use add_volume() instead.')
return self._logical_volumes
@lv_list.setter
def lv_list(self, value):
self.logger.warning(
'lv_list is deprecated. Please use add_volume() instead.')
self._logical_volumes = value
def add_volume(self, name, mount_point, mount_options=None):
"""Adds logical volume to list of volumes to be backed up.
Args:
name: str, full logical volume path (with volume group) in
group/volume_name format.
mount_point: str, path where the volume should be mounted during
the backup. This is normally the same path where the volume is
normally mounted. For example, if the volume is normally
mounted at /var/www, the value passed here should be /var/www
if you want this data to be in the /var/www directory in the
backup.
mount_options: str or None, mount options to be applied when
mounting the snapshot. For example, "noatime,ro". Defaults to
None which applies no mount options.
"""
volume = (name, mount_point, mount_options)
self._logical_volumes.append(volume)
def _create_snapshots(self):
"""Creates snapshots of all the volumns added with add_volume()."""
self.logger.info('Creating LVM snapshots...')
for volume in self._logical_volumes:
# TODO(jpwoodbu) This try/except won't ne necessary when the
# deprecated interface to the self.lv_list is removed.
try:
lv_path, src_mount_path, mount_options = volume
except ValueError:
lv_path, src_mount_path = volume
mount_options = None
vg_name, lv_name = lv_path.split('/')
new_lv_name = lv_name + self.snapshot_suffix
mount_path = (
'{snapshot_mp_bp}{src_mount_path}'.format(
snapshot_mp_bp=self._snapshot_mount_point_base_path,
src_mount_path=src_mount_path))
# TODO(jpwoodbu) Is it really OK to always make a 1GB exception
# table?
command = ['lvcreate', '-s', '-L', '1G', lv_path, '-n',
new_lv_name]
self.run_command(command, self.source_hostname)
self._lv_snapshots.append({
'lv_path': vg_name + '/' + new_lv_name,
'mount_path': mount_path,
'mount_options': mount_options,
'created': True,
'mount_point_created': False,
'mounted': False,
})
def _delete_snapshots(self, error_case=None):
"""Deletes tracked snapshots.
Args:
error_case: bool or None, whether an error has occurred during the
backup. Default is None. This method does not use this arg but
must accept it as part of the post hook API.
"""
self.logger.info('Deleting LVM snapshots...')
for snapshot in self._lv_snapshots:
if snapshot['created']:
lv_path = snapshot['lv_path']
# -f makes lvremove not interactive
command = ['lvremove', '-f', lv_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['created'] = False
def _mount_snapshots(self):
"""Creates mountpoints as well as mounts the snapshots.
If the mountpoint directory already has a file system mounted then we
raise Exception. Metadata is updated whenever a snapshot is
successfully mounted so that _umount_snapshots() knows which
snapshots to try to umount.
TODO(jpwoodbu) Add mount_options to documentation for backup config
files.
"""
self.logger.info('Mounting LVM snapshots...')
for snapshot in self._lv_snapshots:
lv_path = snapshot['lv_path']
device_path = '/dev/' + lv_path
mount_path = snapshot['mount_path']
mount_options = snapshot['mount_options']
# mkdir the mount point
command = ['mkdir', '-p', mount_path]
self.run_command(command, self.source_hostname)
snapshot['mount_point_created'] = True
# If where we want to mount our LV is already a mount point then
# let's back out.
if os.path.ismount(mount_path):
raise Exception(
'{mount_path} is already a mount point.'.format(
mount_path=mount_path))
# mount the LV, possibly with mount options
if mount_options:
command = ['mount', '-o', mount_options, device_path,
mount_path]
else:
command = ['mount', device_path, mount_path]
self.run_command(command, self.source_hostname)
snapshot['mounted'] = True
def _umount_snapshots(self, error_case=None):
"""Umounts mounted snapshots in self._lv_snapshots.
Args:
error_case: bool or None, whether an error has occurred during the
backup. Default is None. This method does not use this arg but
must accept it as part of the post hook API.
"""
# TODO(jpwoodbu) If the user doesn't put '/' in their _includes, then
# we'll end up with directories around where the snapshots are mounted
# that will not get cleaned up. We should probably add functionality to
# make sure the "label" directory is recursively removed. Check out
# shutil.rmtree() to help resolve this issue.
self.logger.info('Umounting LVM snapshots...')
# We need a local copy of the _lv_snapshots list to muck with in this
# method.
local_lv_snapshots = copy.copy(self._lv_snapshots)
# We want to umount these logical volumes in reverse order as this
# should ensure that we umount the deepest paths first.
local_lv_snapshots.reverse()
for snapshot in local_lv_snapshots:
mount_path = snapshot['mount_path']
if snapshot['mounted']:
command = ['umount', mount_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['mounted'] = False
if snapshot['mount_point_created']:
command = ['rmdir', mount_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['mount_point_created'] = False
class RdiffLVMBackup(LVMSourceMixIn, rdiff_backup_wrapper.RdiffBackup):
"""Subclass to add LVM snapshot management to RdiffBackup."""
def __init__(self, *args, **kwargs):
super(RdiffLVMBackup, self).__init__(*args, **kwargs)
def _prefix_mount_point_to_paths(self, paths):
"""Prefixes the snapshot_mount_point_base_path to each path in paths.
Args:
paths: list, list of strings representing paths for the backup
config.
Returns:
List of strings with the given paths prefixed with the base path
where the snapshots are mounted.
"""
new_paths = list()
for path in paths:
new_path = '{snapshot_mp_bp}{path}'.format(
snapshot_mp_bp=self._snapshot_mount_point_base_path,
path=path)
new_paths.append(new_path)
return new_paths
def _run_custom_workflow(self):
"""Run backup of LVM snapshots.
This method overrides the base class's _run_custom_workflow() so that
we can modify the includes and excludes to have the
_snapshot_mount_point_base_path prefixed to their paths. This allows
the user to configure what to backup from the perspective of the file
system on the snapshot itself.
"""
self.logger.debug('RdiffLVMBackup._run_custom_workflow started.')
# Cook the self._includes and self._excludes so that the src paths
# include the mount path for the logical volumes.
self._includes = self._prefix_mount_point_to_paths(self._includes)
self._excludes = self._prefix_mount_point_to_paths(self._excludes)
# After changing the top-level src dir to where the snapshots are
# mounted, have the base class perform an rdiff-backup.
self.top_level_src_dir = self._snapshot_mount_point_base_path
super(RdiffLVMBackup, self)._run_custom_workflow()
self.logger.debug('RdiffLVMBackup._run_custom_workflow completed.')
| 42.835938
| 79
| 0.632683
|
49315e17e8fd41ca242e660e776a43be45073835
| 255
|
py
|
Python
|
docs_src/subcommands/tutorial003/users.py
|
madkinsz/typer
|
a1520dcda685220a9a796288f5eaaebd00d68845
|
[
"MIT"
] | 7,615
|
2019-12-24T13:08:20.000Z
|
2022-03-31T22:07:53.000Z
|
docs_src/subcommands/tutorial003/users.py
|
madkinsz/typer
|
a1520dcda685220a9a796288f5eaaebd00d68845
|
[
"MIT"
] | 351
|
2019-12-24T22:17:54.000Z
|
2022-03-31T15:35:08.000Z
|
docs_src/subcommands/tutorial003/users.py
|
jina-ai/typer
|
8b5e14b25ddf0dd777403015883301b17bedcee0
|
[
"MIT"
] | 360
|
2019-12-24T15:29:59.000Z
|
2022-03-30T20:33:10.000Z
|
import typer
app = typer.Typer()
@app.command()
def create(user_name: str):
typer.echo(f"Creating user: {user_name}")
@app.command()
def delete(user_name: str):
typer.echo(f"Deleting user: {user_name}")
if __name__ == "__main__":
app()
| 14.166667
| 45
| 0.662745
|
a3aebb53c08744dbe62aa01a1184de198bc0d59a
| 879
|
bzl
|
Python
|
tools/markdown.bzl
|
depp/terrestrial-collection-machine
|
54b4f2906bd733d2f5b62622cfcbb7508674bc98
|
[
"MIT"
] | null | null | null |
tools/markdown.bzl
|
depp/terrestrial-collection-machine
|
54b4f2906bd733d2f5b62622cfcbb7508674bc98
|
[
"MIT"
] | null | null | null |
tools/markdown.bzl
|
depp/terrestrial-collection-machine
|
54b4f2906bd733d2f5b62622cfcbb7508674bc98
|
[
"MIT"
] | null | null | null |
def _impl(ctx):
outputs = []
for src in ctx.files.srcs:
name = src.basename
idx = name.find(".")
if idx >= 0:
name = name[:idx]
out = ctx.actions.declare_file(name + ".html")
outputs.append(out)
ctx.actions.run_shell(
inputs = [src],
outputs = [out],
progress_message = "Converting %s to HTML" % src.short_path,
command = ("pandoc --from=commonmark --to=html5 --standalone " +
"--metadata=\"title=$(head -n1 <%s | cut -c3-)\" " +
"--output=%s %s") % (src.path, out.path, src.path),
)
return [DefaultInfo(files = depset(outputs))]
markdown = rule(
implementation = _impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
mandatory = True,
),
},
)
| 30.310345
| 76
| 0.488055
|
b639ac3d74d2c78cbdfc88ee1218fe981fa96e27
| 12,643
|
py
|
Python
|
test/unit_test/test_rolling_windows_model.py
|
WheatonCS/Lexos
|
994be4e403053ebbef18e5758a100af616195706
|
[
"MIT"
] | 107
|
2015-03-19T09:10:31.000Z
|
2022-01-29T01:33:48.000Z
|
test/unit_test/test_rolling_windows_model.py
|
WheatonCS/Lexos
|
994be4e403053ebbef18e5758a100af616195706
|
[
"MIT"
] | 864
|
2015-05-19T19:27:00.000Z
|
2022-01-28T18:48:52.000Z
|
test/unit_test/test_rolling_windows_model.py
|
WheatonCS/Lexos
|
994be4e403053ebbef18e5758a100af616195706
|
[
"MIT"
] | 25
|
2015-06-02T23:03:06.000Z
|
2020-08-06T04:27:49.000Z
|
import numpy as np
import pandas as pd
from lexos.models.rolling_window_model import RollingWindowsModel, \
RWATestOptions
from lexos.receivers.rolling_window_receiver import RWAFrontEndOptions, \
WindowUnitType, RWATokenType, RWARatioTokenOptions, RWAWindowOptions, \
RWAAverageTokenOptions, RWAPlotOptions
# -------------------------- test by ratio count ------------------------------
# noinspection PyProtectedMember
class TestRatioCountOne:
test_ratio_count = RWATestOptions(
file_id_content_map={0: "ha ha ha ha la ta ha",
1: "la la ta ta da da ha"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=RWARatioTokenOptions(
token_type=RWATokenType.string,
token_frame=pd.DataFrame(
data={
"numerator": ["t"],
"denominator": ["a"]
}
)
),
average_token_options=None,
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=3,
window_unit=WindowUnitType.letter
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone="ta",
text_color="#000000"
)
)
# Get the rolling window model and other test components
rw_ratio_model = RollingWindowsModel(test_option=test_ratio_count)
rw_ratio_windows = rw_ratio_model._get_windows()
rw_ratio_graph = rw_ratio_model._generate_rwa_graph()
rw_ratio_csv_frame = rw_ratio_model._get_rwa_csv_frame()
rw_ratio_milestone = \
rw_ratio_model._find_mile_stone_windows_indexes_in_all_windows()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_ratio_windows,
['ha ', 'a h', ' ha', 'ha ', 'a h', ' ha', 'ha ', 'a h', ' ha',
'ha ', 'a l', ' la', 'la ', 'a t', ' ta', 'ta ', 'a h', ' ha'])
def test_token_ratio_windows(self):
pd.testing.assert_series_equal(
left=self.rw_ratio_model._find_token_ratio_in_windows(
numerator_token="t",
denominator_token="a",
windows=self.rw_ratio_windows
),
right=pd.Series(
data=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0],
),
check_names=False)
def test_generate_rwa_graph(self):
assert self.rw_ratio_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['x'],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
)
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['y'],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.5, 0.5, 0.5, 0.0, 0.0]
)
def test_find_milestone(self):
assert self.rw_ratio_milestone == {'t': [15],
'a': [1, 4, 7, 10, 13, 16]}
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_ratio_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17],
columns=["t / (t + a)"],
data=[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.],
[0.], [0.], [0.], [0.], [0.5], [0.5], [0.5], [0.], [0.]]
)
)
# -----------------------------------------------------------------------------
# noinspection PyProtectedMember
class TestRatioCountTwo:
test_ratio_count = RWATestOptions(
file_id_content_map={0: "ha ha ha ha la ta ha \n ha ha \n ta ha",
1: "la la ta ta da da ha"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=RWARatioTokenOptions(
token_type=RWATokenType.word,
token_frame=pd.DataFrame(
data={
"numerator": ["ha"],
"denominator": ["la"]
}
)
),
average_token_options=None,
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.word
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone="ta",
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_ratio_model = RollingWindowsModel(test_option=test_ratio_count)
rw_ratio_windows = rw_ratio_model._get_windows()
rw_ratio_graph = rw_ratio_model._generate_rwa_graph()
rw_ratio_milestone = \
rw_ratio_model._find_mile_stone_windows_indexes_in_all_windows()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_ratio_windows,
['ha ha ', 'ha ha ', 'ha ha ', 'ha la ', 'la ta ', 'ta ha \n ',
'ha \n ha ', 'ha ha \n ', 'ha \n ta ', 'ta ha'])
def test_generate_rwa_graph(self):
assert self.rw_ratio_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['x'],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]
)
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['y'],
[1., 1., 1., 0.5, 0., 1., 1., 1., 1., 1.]
)
def test_find_milestone(self):
assert self.rw_ratio_milestone == {'t': [5, 9],
'a': []}
# -----------------------------------------------------------------------------
# -------------------------- test by average count ----------------------------
# noinspection PyProtectedMember
class TestAverageCountOne:
test_average_count = RWATestOptions(
file_id_content_map={
0: "ha ha \n ha ha \n la ta \n ha \n ta ta \n la la"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=None,
average_token_options=RWAAverageTokenOptions(
token_type=RWATokenType.string,
tokens=["ta", "ha"]),
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.line
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone=None,
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_average_model = RollingWindowsModel(test_option=test_average_count)
rw_average_windows = rw_average_model._get_windows()
rw_average_graph = rw_average_model._generate_rwa_graph()
rw_average_csv_frame = rw_average_model._get_rwa_csv_frame()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_average_windows,
['ha ha \n ha ha \n', ' ha ha \n la ta \n', ' la ta \n ha \n',
' ha \n ta ta \n', ' ta ta \n la la']
)
def test_generate_rwa_graph(self):
assert self.rw_average_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['x'],
[0., 1., 2., 3., 4.]
)
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['y'],
[0., 0.5, 0.5, 1., 1.]
)
assert self.rw_average_graph['data'][1]['mode'] == 'lines'
assert self.rw_average_graph['data'][1]['name'] == 'ha'
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_average_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4],
columns=["ta", "ha"],
data=[[0., 2.], [0.5, 1.], [0.5, 0.5], [1., 0.5], [1., 0.]]
)
)
# noinspection PyProtectedMember
class TestAverageCountTwo:
test_average_count = RWATestOptions(
file_id_content_map={
0: "ha ha \n ha ha \n la ta \n ha \n ta ta \n la la"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=None,
average_token_options=RWAAverageTokenOptions(
token_type=RWATokenType.word,
tokens=["ta", "ha"]),
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.word
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone=None,
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_average_model = RollingWindowsModel(test_option=test_average_count)
rw_average_windows = rw_average_model._get_windows()
rw_average_graph = rw_average_model._generate_rwa_graph()
rw_average_csv_frame = rw_average_model._get_rwa_csv_frame()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_average_windows,
['ha ha \n ', 'ha \n ha ', 'ha ha \n ', 'ha \n la ', 'la ta \n ',
'ta \n ha \n ', 'ha \n ta ', 'ta ta \n ', 'ta \n la ', 'la la']
)
def test_generate_rwa_graph(self):
assert self.rw_average_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['x'],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]
)
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['y'],
[0., 0., 0., 0., 0.5, 0.5, 0.5, 1., 0.5, 0.]
)
assert self.rw_average_graph['data'][1]['mode'] == 'lines'
assert self.rw_average_graph['data'][1]['name'] == 'ha'
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_average_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
columns=["ta", "ha"],
data=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 0.5],
[0.5, 0.0], [0.5, 0.5], [0.5, 0.5], [1.0, 0.0],
[0.5, 0.0], [0.0, 0.0]]
)
)
# -----------------------------------------------------------------------------
# -------------------------- test static method -------------------------------
# noinspection PyProtectedMember
rw_test_letters = RollingWindowsModel._get_letters_windows(
passage="hello good", windows_size=2)
# noinspection PyProtectedMember
rw_test_words = RollingWindowsModel._get_word_windows(
passage="hello goodbye dog", window_size=1)
# noinspection PyProtectedMember
rw_test_lines = RollingWindowsModel._get_line_windows(
passage="hello goodbye dog hi \n this is a test \n this is another test",
window_size=1)
# noinspection PyProtectedMember
rw_test_find_regex = RollingWindowsModel._find_regex_in_window(
window="hello this the test", regex="^h")
# noinspection PyProtectedMember
rw_test_find_word = RollingWindowsModel._find_word_in_window(
window="hello this the test", word="the")
# noinspection PyProtectedMember
rw_test_find_string = RollingWindowsModel._find_string_in_window(
window="hello this the test the test", string="the test")
class TestStaticMethods:
def test_get_letters_window(self):
np.testing.assert_array_equal(
rw_test_letters[0:9],
['he', 'el', 'll', 'lo', 'o ', ' g', 'go', 'oo', 'od']
)
def test_get_words_window(self):
np.testing.assert_array_equal(rw_test_words[0:3],
['hello ', 'goodbye ', 'dog'])
def test_get_lines_window(self):
np.testing.assert_array_equal(rw_test_lines[0:3],
["hello goodbye dog hi \n",
" this is a test \n",
" this is another test"])
def test_find_regex(self):
assert rw_test_find_regex == 1
def test_find_word(self):
assert rw_test_find_word == 1
def test_find_string(self):
assert rw_test_find_string == 2
| 36.646377
| 79
| 0.534525
|
5bce5a16af16f4a5a2ab428150dfb06af2a7f66d
| 1,811
|
py
|
Python
|
tools/Polygraphy/setup.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | 1
|
2021-08-23T01:15:16.000Z
|
2021-08-23T01:15:16.000Z
|
tools/Polygraphy/setup.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/setup.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import polygraphy
from setuptools import setup, find_packages
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
BIN_DIR = os.path.join(ROOT_DIR, "bin")
def no_publish():
blacklist = ["register"]
for cmd in blacklist:
if cmd in sys.argv:
raise RuntimeError('Command "{}" blacklisted'.format(cmd))
REQUIRED_PACKAGES = []
def main():
no_publish()
setup(
name="polygraphy",
version=polygraphy.__version__,
description="Polygraphy: A Deep Learning Inference Prototyping and Debugging Toolkit",
long_description=open("README.md", "r", encoding="utf-8").read(),
url="https://github.com/NVIDIA/TensorRT/tree/master/tools/Polygraphy",
author="NVIDIA",
author_email="svc_tensorrt@nvidia.com",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
],
license="Apache 2.0",
install_requires=REQUIRED_PACKAGES,
packages=find_packages(exclude=("tests", "tests.*")),
scripts=[os.path.join(BIN_DIR, "polygraphy")],
zip_safe=True,
)
if __name__ == "__main__":
main()
| 30.694915
| 94
| 0.677526
|
074b21e06e2f433b6399b07848bb11d144683d48
| 461
|
py
|
Python
|
src/test_bell_state.py
|
raviksharma/quantum
|
b76e9eaff59a59dada645f2da81bc5b8361c6722
|
[
"MIT"
] | null | null | null |
src/test_bell_state.py
|
raviksharma/quantum
|
b76e9eaff59a59dada645f2da81bc5b8361c6722
|
[
"MIT"
] | null | null | null |
src/test_bell_state.py
|
raviksharma/quantum
|
b76e9eaff59a59dada645f2da81bc5b8361c6722
|
[
"MIT"
] | null | null | null |
from math import sqrt
import numpy as np
from base import zero, one, \
zz, zo, oz, oo
from gates import I, H, K, CNOT
# entangled quantum state
#
# +---+
# |0> ----+ H +-----.--- |00> + |11>
# +---+ | } -----------
# | sqrt(2)
# |0> --------------O---
#
def test_bell_state():
assert(np.allclose(CNOT(K(zz)), (zz + oo)/sqrt(2)))
| 24.263158
| 55
| 0.364425
|
6cdcfc0a9097347ee2d2363cc6618ec9a7ecae44
| 15,237
|
py
|
Python
|
tests/query_test/test_udfs.py
|
andybab/Impala
|
d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_udfs.py
|
andybab/Impala
|
d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa
|
[
"Apache-2.0"
] | null | null | null |
tests/query_test/test_udfs.py
|
andybab/Impala
|
d520a9cdea2fc97e8d5da9fbb0244e60ee416bfa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.common.impala_cluster import ImpalaCluster
from subprocess import call
class TestUdfs(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfs, cls).add_test_dimensions()
# Without limiting the test suite to a single exec option, the tests will fail
# because the same test case may be executed in parallel with different exec option
# values leading to conflicting DDL ops.
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))
def test_native_functions(self, vector):
database = 'native_function_test'
self.__load_functions(
self.create_udfs_template, vector, database, '/test-warehouse/libTestUdfs.so')
self.__load_functions(
self.create_udas_template, vector, database, '/test-warehouse/libudasample.so')
self.run_test_case('QueryTest/udf', vector, use_db=database)
self.run_test_case('QueryTest/udf-init-close', vector, use_db=database)
self.run_test_case('QueryTest/uda', vector, use_db=database)
def test_ir_functions(self, vector):
database = 'ir_function_test'
self.__load_functions(
self.create_udfs_template, vector, database, '/test-warehouse/test-udfs.ll')
self.run_test_case('QueryTest/udf', vector, use_db=database)
self.run_test_case('QueryTest/udf-init-close', vector, use_db=database)
def test_udf_errors(self, vector):
self.run_test_case('QueryTest/udf-errors', vector)
def test_hive_udfs(self, vector):
self.client.execute('create database if not exists udf_test')
self.client.execute('create database if not exists uda_test')
self.run_test_case('QueryTest/load-hive-udfs', vector)
self.run_test_case('QueryTest/hive-udf', vector)
def test_libs_with_same_filenames(self, vector):
self.run_test_case('QueryTest/libs_with_same_filenames', vector)
def test_udf_update_via_drop(self, vector):
"""Test updating the UDF binary without restarting Impala. Dropping
the function should remove the binary from the local cache."""
# Run with sync_ddl to guarantee the drop is processed by all impalads.
exec_options = vector.get_value('exec_option')
exec_options['sync_ddl'] = 1
old_udf = os.path.join(os.environ['IMPALA_HOME'],
'testdata/udfs/impala-hive-udfs.jar')
new_udf = os.path.join(os.environ['IMPALA_HOME'],
'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar')
udf_dst = '/test-warehouse/impala-hive-udfs2.jar'
drop_fn_stmt = 'drop function if exists default.udf_update_test_drop()'
create_fn_stmt = "create function default.udf_update_test_drop() returns string "\
"LOCATION '" + udf_dst + "' SYMBOL='com.cloudera.impala.TestUpdateUdf'"
query_stmt = "select default.udf_update_test_drop()"
# Put the old UDF binary on HDFS, make the UDF in Impala and run it.
call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst])
self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options)
self.execute_query_expect_success(self.client, create_fn_stmt, exec_options)
self.__run_query_all_impalads(exec_options, query_stmt, ["Old UDF"])
# Update the binary, drop and create the function again. The new binary should
# be running.
call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst])
self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options)
self.execute_query_expect_success(self.client, create_fn_stmt, exec_options)
self.__run_query_all_impalads(exec_options, query_stmt, ["New UDF"])
def test_udf_update_via_create(self, vector):
"""Test updating the UDF binary without restarting Impala. Creating a new function
from the library should refresh the cache."""
# Run with sync_ddl to guarantee the create is processed by all impalads.
exec_options = vector.get_value('exec_option')
exec_options['sync_ddl'] = 1
old_udf = os.path.join(os.environ['IMPALA_HOME'],
'testdata/udfs/impala-hive-udfs.jar')
new_udf = os.path.join(os.environ['IMPALA_HOME'],
'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar')
udf_dst = '/test-warehouse/impala-hive-udfs3.jar'
old_function_name = "udf_update_test_create1"
new_function_name = "udf_update_test_create2"
drop_fn_template = 'drop function if exists default.%s()'
self.execute_query_expect_success(
self.client, drop_fn_template % old_function_name, exec_options)
self.execute_query_expect_success(
self.client, drop_fn_template % new_function_name, exec_options)
create_fn_template = "create function default.%s() returns string "\
"LOCATION '" + udf_dst + "' SYMBOL='com.cloudera.impala.TestUpdateUdf'"
query_template = "select default.%s()"
# Put the old UDF binary on HDFS, make the UDF in Impala and run it.
call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst])
self.execute_query_expect_success(
self.client, create_fn_template % old_function_name, exec_options)
self.__run_query_all_impalads(
exec_options, query_template % old_function_name, ["Old UDF"])
# Update the binary, and create a new function using the binary. The new binary
# should be running.
call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst])
self.execute_query_expect_success(
self.client, create_fn_template % new_function_name, exec_options)
self.__run_query_all_impalads(
exec_options, query_template % new_function_name, ["New UDF"])
# The old function should use the new library now
self.__run_query_all_impalads(
exec_options, query_template % old_function_name, ["New UDF"])
def test_drop_function_while_running(self, vector):
self.client.execute("drop function if exists default.drop_while_running(BIGINT)")
self.client.execute("create function default.drop_while_running(BIGINT) returns "\
"BIGINT LOCATION '/test-warehouse/libTestUdfs.so' SYMBOL='Identity'")
query = \
"select default.drop_while_running(l_orderkey) from tpch.lineitem limit 10000";
# Run this query asynchronously.
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
# Fetch some rows from the async query to make sure the UDF is being used
results = self.client.fetch(query, handle, 1)
assert results.success
assert len(results.data) == 1
# Drop the function while the original query is running.
self.client.execute("drop function default.drop_while_running(BIGINT)")
# Fetch the rest of the rows, this should still be able to run the UDF
results = self.client.fetch(query, handle, -1)
assert results.success
assert len(results.data) == 9999
# Run serially because this will blow the process limit, potentially causing other
# queries to fail
@pytest.mark.execute_serially
def test_mem_limits(self, vector):
# Set the mem limit high enough that a simple scan can run
mem_limit = 1024 * 1024
vector.get_value('exec_option')['mem_limit'] = mem_limit
try:
self.run_test_case('QueryTest/udf-mem-limit', vector)
assert False, "Query was expected to fail"
except ImpalaBeeswaxException, e:
self.__check_exception(e)
try:
self.run_test_case('QueryTest/uda-mem-limit', vector)
assert False, "Query was expected to fail"
except ImpalaBeeswaxException, e:
self.__check_exception(e)
def __check_exception(self, e):
# The interesting exception message may be in 'e' or in its inner_exception
# depending on the point of query failure.
if 'Memory limit exceeded' in str(e) or 'Cancelled' in str(e):
return
if e.inner_exception is not None\
and ('Memory limit exceeded' in e.inner_exception.message
or 'Cancelled' not in e.inner_exception.message):
return
raise e
def __run_query_all_impalads(self, exec_options, query, expected):
impala_cluster = ImpalaCluster()
for impalad in impala_cluster.impalads:
client = impalad.service.create_beeswax_client()
result = self.execute_query_expect_success(client, query, exec_options)
assert result.data == expected
def __load_functions(self, template, vector, database, location):
queries = template.format(database=database, location=location)
# Split queries and remove empty lines
queries = [q for q in queries.split(';') if q.strip()]
exec_options = vector.get_value('exec_option')
for query in queries:
if query.strip() == '': continue
result = self.execute_query_expect_success(self.client, query, exec_options)
assert result is not None
# Create test UDA functions in {database} from library {location}
create_udas_template = """
drop function if exists {database}.test_count(int);
drop function if exists {database}.hll(int);
drop function if exists {database}.sum_small_decimal(decimal(9,2));
create database if not exists {database};
create aggregate function {database}.test_count(int) returns bigint
location '{location}' update_fn='CountUpdate';
create aggregate function {database}.hll(int) returns string
location '{location}' update_fn='HllUpdate';
create aggregate function {database}.sum_small_decimal(decimal(9,2))
returns decimal(9,2) location '{location}' update_fn='SumSmallDecimalUpdate';
"""
# Create test UDF functions in {database} from library {location}
create_udfs_template = """
drop function if exists {database}.identity(boolean);
drop function if exists {database}.identity(tinyint);
drop function if exists {database}.identity(smallint);
drop function if exists {database}.identity(int);
drop function if exists {database}.identity(bigint);
drop function if exists {database}.identity(float);
drop function if exists {database}.identity(double);
drop function if exists {database}.identity(string);
drop function if exists {database}.identity(timestamp);
drop function if exists {database}.identity(decimal(9,0));
drop function if exists {database}.identity(decimal(18,1));
drop function if exists {database}.identity(decimal(38,10));
drop function if exists {database}.all_types_fn(
string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0));
drop function if exists {database}.no_args();
drop function if exists {database}.var_and(boolean...);
drop function if exists {database}.var_sum(int...);
drop function if exists {database}.var_sum(double...);
drop function if exists {database}.var_sum(string...);
drop function if exists {database}.var_sum(decimal(4,2)...);
drop function if exists {database}.var_sum_multiply(double, int...);
drop function if exists {database}.constant_timestamp();
drop function if exists {database}.validate_arg_type(string);
drop function if exists {database}.count_rows();
drop function if exists {database}.constant_arg(int);
drop function if exists {database}.validate_open(int);
drop function if exists {database}.mem_test(bigint);
drop function if exists {database}.mem_test_leaks(bigint);
drop function if exists {database}.unmangled_symbol();
create database if not exists {database};
create function {database}.identity(boolean) returns boolean
location '{location}' symbol='Identity';
create function {database}.identity(tinyint) returns tinyint
location '{location}' symbol='Identity';
create function {database}.identity(smallint) returns smallint
location '{location}' symbol='Identity';
create function {database}.identity(int) returns int
location '{location}' symbol='Identity';
create function {database}.identity(bigint) returns bigint
location '{location}' symbol='Identity';
create function {database}.identity(float) returns float
location '{location}' symbol='Identity';
create function {database}.identity(double) returns double
location '{location}' symbol='Identity';
create function {database}.identity(string) returns string
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_9StringValE';
create function {database}.identity(timestamp) returns timestamp
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_12TimestampValE';
create function {database}.identity(decimal(9,0)) returns decimal(9,0)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(18,1)) returns decimal(18,1)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(38,10)) returns decimal(38,10)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.all_types_fn(
string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0))
returns int
location '{location}' symbol='AllTypes';
create function {database}.no_args() returns string
location '{location}'
symbol='_Z6NoArgsPN10impala_udf15FunctionContextE';
create function {database}.var_and(boolean...) returns boolean
location '{location}' symbol='VarAnd';
create function {database}.var_sum(int...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(double...) returns double
location '{location}' symbol='VarSum';
create function {database}.var_sum(string...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(decimal(4,2)...) returns decimal(18,2)
location '{location}' symbol='VarSum';
create function {database}.var_sum_multiply(double, int...) returns double
location '{location}'
symbol='_Z14VarSumMultiplyPN10impala_udf15FunctionContextERKNS_9DoubleValEiPKNS_6IntValE';
create function {database}.constant_timestamp() returns timestamp
location '{location}' symbol='ConstantTimestamp';
create function {database}.validate_arg_type(string) returns boolean
location '{location}' symbol='ValidateArgType';
create function {database}.count_rows() returns bigint
location '{location}' symbol='Count' prepare_fn='CountPrepare' close_fn='CountClose';
create function {database}.constant_arg(int) returns int
location '{location}' symbol='ConstantArg' prepare_fn='ConstantArgPrepare' close_fn='ConstantArgClose';
create function {database}.validate_open(int) returns boolean
location '{location}' symbol='ValidateOpen'
prepare_fn='ValidateOpenPrepare' close_fn='ValidateOpenClose';
create function {database}.mem_test(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare' close_fn='MemTestClose';
create function {database}.mem_test_leaks(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare';
-- Regression test for IMPALA-1475
create function {database}.unmangled_symbol() returns bigint
location '{location}' symbol='UnmangledSymbol';
"""
| 43.410256
| 103
| 0.754282
|
fb55d8d53ff329b361e22c59af97231cd25e717e
| 4,673
|
py
|
Python
|
purity_fb/purity_fb_1dot8/models/linkaggregationgroup.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot8/models/linkaggregationgroup.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot8/models/linkaggregationgroup.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.8 Python SDK
Pure Storage FlashBlade REST 1.8 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Linkaggregationgroup(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ports': 'list[Reference]',
'add_ports': 'list[Reference]',
'remove_ports': 'list[Reference]'
}
attribute_map = {
'ports': 'ports',
'add_ports': 'add_ports',
'remove_ports': 'remove_ports'
}
def __init__(self, ports=None, add_ports=None, remove_ports=None):
"""
Linkaggregationgroup - a model defined in Swagger
"""
self._ports = None
self._add_ports = None
self._remove_ports = None
if ports is not None:
self.ports = ports
if add_ports is not None:
self.add_ports = add_ports
if remove_ports is not None:
self.remove_ports = remove_ports
@property
def ports(self):
"""
Gets the ports of this Linkaggregationgroup.
:return: The ports of this Linkaggregationgroup.
:rtype: list[Reference]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this Linkaggregationgroup.
:param ports: The ports of this Linkaggregationgroup.
:type: list[Reference]
"""
self._ports = ports
@property
def add_ports(self):
"""
Gets the add_ports of this Linkaggregationgroup.
:return: The add_ports of this Linkaggregationgroup.
:rtype: list[Reference]
"""
return self._add_ports
@add_ports.setter
def add_ports(self, add_ports):
"""
Sets the add_ports of this Linkaggregationgroup.
:param add_ports: The add_ports of this Linkaggregationgroup.
:type: list[Reference]
"""
self._add_ports = add_ports
@property
def remove_ports(self):
"""
Gets the remove_ports of this Linkaggregationgroup.
:return: The remove_ports of this Linkaggregationgroup.
:rtype: list[Reference]
"""
return self._remove_ports
@remove_ports.setter
def remove_ports(self, remove_ports):
"""
Sets the remove_ports of this Linkaggregationgroup.
:param remove_ports: The remove_ports of this Linkaggregationgroup.
:type: list[Reference]
"""
self._remove_ports = remove_ports
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Linkaggregationgroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.551136
| 204
| 0.572651
|
77df47a3b5af7388aad738253a41e6be324659e9
| 4,213
|
py
|
Python
|
labml_nn/transformers/fast_weights/token_wise.py
|
BioGeek/annotated_deep_learning_paper_implementations
|
e2516cc3063cdfdf11cda05f22a10082297aa33e
|
[
"MIT"
] | 3,714
|
2021-05-26T03:42:15.000Z
|
2022-03-31T16:45:20.000Z
|
labml_nn/transformers/fast_weights/token_wise.py
|
BioGeek/annotated_deep_learning_paper_implementations
|
e2516cc3063cdfdf11cda05f22a10082297aa33e
|
[
"MIT"
] | 43
|
2021-05-26T05:26:42.000Z
|
2022-03-23T11:50:56.000Z
|
labml_nn/transformers/fast_weights/token_wise.py
|
BioGeek/annotated_deep_learning_paper_implementations
|
e2516cc3063cdfdf11cda05f22a10082297aa33e
|
[
"MIT"
] | 349
|
2021-05-26T21:07:09.000Z
|
2022-03-31T07:52:00.000Z
|
"""
---
title: Fast Weight Systems
summary: >
This is an annotated implementation/tutorial of
Linear Transformers Are Secretly Fast Weight Memory Systems in PyTorch.
---
"""
from typing import Optional
import torch
from torch import nn
from labml_helpers.module import Module
from labml_nn.transformers.fast_weights import DPFP
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import PrepareForMultiHeadAttention
from labml_nn.utils import clone_module_list
class FastWeightsAttention(Module):
def __init__(self, heads: int, d_model: int, dropout_prob: float, phi: DPFP):
super().__init__()
# Number of features per head
self.d_k = d_model // heads
#
self.heads = heads
# These transform the `query` multi-headed attention.
self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
# These transform the `key` and `value` for multi-headed attention.
self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.gate = nn.Sequential(PrepareForMultiHeadAttention(d_model, heads, 1, bias=False),
nn.Sigmoid())
self.phi = phi
# Output layer
self.output = nn.Linear(d_model, d_model)
# Dropout
self.dropout = nn.Dropout(dropout_prob)
def forward(self, x: torch.Tensor, weights: Optional[torch.Tensor]):
query = self.phi(self.query(x))
key = self.phi(self.key(x))
value = self.value(x)
if weights is None:
weights = key.new_zeros((key.shape[0], key.shape[1], value.shape[2], key.shape[2]))
value_existing = torch.einsum('bhvk,bhk->bhv', weights, key)
beta = self.gate(x)
weights = weights + torch.einsum('bhv,bhk->bhvk', beta * (value - value_existing), key)
x = torch.einsum('bhvk,bhk->bhv', weights, query)
# Concatenate multiple heads
x = x.reshape(x.shape[0], -1)
# Output layer
return self.output(x), weights
class FastWeightsAttentionTransformerLayer(Module):
def __init__(self, *,
d_model: int,
attn: FastWeightsAttention,
feed_forward: FeedForward,
dropout_prob: float):
super().__init__()
# Transformer size $d_{model}$
self.size = d_model
#
self.attn = attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
# Normalization layers
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, x: torch.Tensor, weights: Optional[torch.Tensor]):
attn, weights = self.attn(x, weights)
# Add the self attention results
x = x + self.dropout(attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x, weights
class FastWeightsAttentionTransformer(Module):
def __init__(self, layer: FastWeightsAttentionTransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x_seq: torch.Tensor):
# Split the input to a list along the sequence axis
x_seq = torch.unbind(x_seq, dim=0)
# List to store the outputs
res = []
# For each input step
weights = [None for _ in range(len(self.layers))]
for x in x_seq:
# Run through each layer
for i, layer in enumerate(self.layers):
# Get layer output
x, weights[i] = layer(x, weights[i])
res.append(x)
# Stack the output tensors
res = torch.stack(res)
# Normalize the output
return self.norm(res)
| 32.160305
| 95
| 0.626869
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.